code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
if len(__A ) != len(__A ):
raise ValueError('''String lengths must match!''' )
lowercase__ : List[Any] = 0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase_ = random.Random()
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Union[str, Any]=1.0 , __A : Dict=None , __A : List[Any]=None ) -> Optional[Any]:
if rng is None:
_SCREAMING_SNAKE_CASE = global_rng
_SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Tuple=4_0_0 , __lowerCamelCase : List[Any]=2_0_0_0 , __lowerCamelCase : Optional[Any]=2_0_4_8 , __lowerCamelCase : List[Any]=1_2_8 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : Optional[int]=3_0 , __lowerCamelCase : str=4_4_1_0_0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = min_seq_length
_SCREAMING_SNAKE_CASE = max_seq_length
_SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE = spectrogram_length
_SCREAMING_SNAKE_CASE = feature_size
_SCREAMING_SNAKE_CASE = num_audio_channels
_SCREAMING_SNAKE_CASE = hop_length
_SCREAMING_SNAKE_CASE = chunk_length
_SCREAMING_SNAKE_CASE = sampling_rate
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=False ):
"""simple docstring"""
def _flatten(__lowerCamelCase : List[str] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = TvltFeatureExtractor
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(__lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(__lowerCamelCase , "hop_length" ) )
self.assertTrue(hasattr(__lowerCamelCase , "chunk_length" ) )
self.assertTrue(hasattr(__lowerCamelCase , "sampling_rate" ) )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE = dict_first.pop("mel_filters" )
_SCREAMING_SNAKE_CASE = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE = dict_first.pop("mel_filters" )
_SCREAMING_SNAKE_CASE = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
# Initialize feature_extractor
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_SCREAMING_SNAKE_CASE = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_SCREAMING_SNAKE_CASE = feature_extractor(__lowerCamelCase , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_SCREAMING_SNAKE_CASE = feature_extractor(
__lowerCamelCase , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_SCREAMING_SNAKE_CASE = np.asarray(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = feature_extractor(__lowerCamelCase , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(__lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
_SCREAMING_SNAKE_CASE = feature_extractor(__lowerCamelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCamelCase , atol=1e-4 ) )
| 418
| 0
|
def A_ ( __a : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a__ = set()
# Replace all the whitespace in our sentence
a__ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__a ) == 26
def A_ ( __a : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a__ = [False] * 26
for char in input_str:
if char.islower():
a__ = True
elif char.isupper():
a__ = True
return all(__a )
def A_ ( __a : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ):
"""simple docstring"""
from timeit import timeit
a__ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__a ) )
print(timeit("""is_pangram_faster()""" , setup=__a ) )
print(timeit("""is_pangram_fastest()""" , setup=__a ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 351
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A_ ( __a : str = "laptop" ):
"""simple docstring"""
a__ = F'''https://www.amazon.in/laptop/s?k={product}'''
a__ = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
a__ = BeautifulSoup(requests.get(__a , headers=__a ).text )
# Initialize a Pandas dataframe with the column titles
a__ = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
a__ = item.ha.text
a__ = """https://www.amazon.in/""" + item.ha.a["""href"""]
a__ = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
a__ = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
a__ = """Not available"""
try:
a__ = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
a__ = """"""
try:
a__ = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
a__ = float("""nan""" )
except AttributeError:
pass
a__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a__ = """ """
a__ = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCAmelCase = """headphones"""
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 351
| 1
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ : List[str] = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE ) , version.parse(SCREAMING_SNAKE_CASE ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
UpperCamelCase : str = f"""\n{hint}""" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , SCREAMING_SNAKE_CASE ):
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = requirement, None, None
else:
UpperCamelCase : str = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f""" got {requirement}""" )
UpperCamelCase , UpperCamelCase : Dict = match[0]
UpperCamelCase : List[str] = want_full.split(""",""" ) # there could be multiple requirements
UpperCamelCase : List[Any] = {}
for w in want_range:
UpperCamelCase : Dict = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f""" but got {requirement}""" )
UpperCamelCase , UpperCamelCase : List[Any] = match[0]
UpperCamelCase : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCamelCase : Optional[Any] = """.""".join([str(SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
UpperCamelCase : Tuple = importlib.metadata.version(SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 102
|
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
if isinstance(UpperCamelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
def __lowerCAmelCase ( self , A , A ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
def __lowerCAmelCase ( self ) -> str:
pass
def __lowerCAmelCase ( self , A , A , A , A , A=None , **A ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(A , A )
_UpperCAmelCase : Dict = TFVisionTextDualEncoderModel(A )
_UpperCAmelCase : List[str] = model(input_ids=A , pixel_values=A , attention_mask=A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase ( self , A , A , A , A , A=None , **A ) -> str:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.get_vision_text_model(A , A )
_UpperCAmelCase : int = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_UpperCAmelCase : Optional[int] = model(input_ids=A , pixel_values=A , attention_mask=A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self , A , A , A , A , A=None , **A ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : int = self.get_vision_text_model(A , A )
_UpperCAmelCase : str = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCAmelCase : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**A )
_UpperCAmelCase : Dict = model(input_ids=A , pixel_values=A , attention_mask=A )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self , A , A , A , A , A=None , **A ) -> List[str]:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_vision_text_model(A , A )
_UpperCAmelCase : str = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_UpperCAmelCase : List[str] = model(input_ids=A , pixel_values=A , attention_mask=A )
_UpperCAmelCase : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_UpperCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(A )
_UpperCAmelCase : Tuple = model(input_ids=A , pixel_values=A , attention_mask=A )
_UpperCAmelCase : Union[str, Any] = after_output[0].numpy()
_UpperCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1E-5 )
def __lowerCAmelCase ( self , A , A , A , A , A=None , **A ) -> int:
_UpperCAmelCase , _UpperCAmelCase : str = self.get_vision_text_model(A , A )
_UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_UpperCAmelCase : List[Any] = model(
input_ids=A , pixel_values=A , attention_mask=A , output_attentions=A )
_UpperCAmelCase : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Any = to_atuple(vision_model.config.image_size )
_UpperCAmelCase : Tuple = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase : str = output.text_model_output.attentions
self.assertEqual(len(A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self , A , A , A ) -> Any:
_UpperCAmelCase : int = np.abs((a - b) ).max()
self.assertLessEqual(A , A , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : str = self.prepare_config_and_inputs()
self.check_save_load(**A )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.get_pretrained_model_and_inputs()
_UpperCAmelCase : int = model_a(**A )
_UpperCAmelCase : Tuple = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A )
_UpperCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(A )
_UpperCAmelCase : Optional[int] = model_a(**A )
_UpperCAmelCase : Any = after_outputs[0].numpy()
_UpperCAmelCase : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1E-5 )
@require_tf
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
_UpperCAmelCase : Tuple = 1_3
_UpperCAmelCase : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase : Union[str, Any] = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCAmelCase ( self , A , A ) -> Union[str, Any]:
_UpperCAmelCase : int = TFViTModel(A , name='''vision_model''' )
_UpperCAmelCase : str = TFBertModel(A , name='''text_model''' )
return vision_model, text_model
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : int = TFViTModelTester(self )
_UpperCAmelCase : Optional[int] = TFBertModelTester(self )
_UpperCAmelCase : List[str] = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Tuple = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
_UpperCAmelCase : List[str] = 1_3
_UpperCAmelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase : int = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCAmelCase ( self , A , A , A , A , A=None , **A ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_vision_text_model(A , A )
_UpperCAmelCase : Dict = TFVisionTextDualEncoderModel(vision_model=A , text_model=A )
_UpperCAmelCase : Tuple = model(
input_ids=A , pixel_values=A , attention_mask=A , output_attentions=A )
_UpperCAmelCase : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.image_size )
_UpperCAmelCase : List[str] = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase : List[Any] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase : Any = output.text_model_output.attentions
self.assertEqual(len(A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self , A , A ) -> Dict:
_UpperCAmelCase : Optional[int] = TFDeiTModel(A , name='''vision_model''' )
_UpperCAmelCase : Union[str, Any] = TFRobertaModel(A , name='''text_model''' )
return vision_model, text_model
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : str = TFDeiTModelTester(self )
_UpperCAmelCase : Union[str, Any] = TFRobertaModelTester(self )
_UpperCAmelCase : List[str] = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Any = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
_UpperCAmelCase : Optional[int] = 1_3
_UpperCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_UpperCAmelCase : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_UpperCAmelCase : Any = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCAmelCase ( self , A , A ) -> Optional[int]:
_UpperCAmelCase : int = TFCLIPVisionModel(A , name='''vision_model''' )
_UpperCAmelCase : List[str] = TFBertModel(A , name='''text_model''' )
return vision_model, text_model
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = TFCLIPVisionModelTester(self )
_UpperCAmelCase : Optional[Any] = TFBertModelTester(self )
_UpperCAmelCase : List[Any] = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase : Any = vision_config_and_inputs
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[str] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=A )
_UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCAmelCase : List[Any] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=A , padding=A , return_tensors='''np''' )
_UpperCAmelCase : Union[str, Any] = model(**A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase : Optional[int] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , A , atol=1E-3 ) )
| 506
| 0
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ : Optional[Any] = TypeVar("""T""")
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return (position - 1) // 2
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return (2 * position) + 1
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return (2 * position) + 2
class __magic_name__ ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = {}
_lowerCAmelCase = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.elements == 0
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
self.heap.append((elem, weight) )
_lowerCAmelCase = self.elements
self.elements += 1
self._bubble_up(lowercase__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowerCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowerCAmelCase = self.heap[0]
self._bubble_down(lowercase__ )
return elem
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.position_map[elem]
_lowerCAmelCase = (elem, weight)
if position > 0:
_lowerCAmelCase = get_parent_position(lowercase__ )
_lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase__ )
else:
self._bubble_down(lowercase__ )
else:
self._bubble_down(lowercase__ )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
_lowerCAmelCase = get_parent_position(lowercase__ )
_lowerCAmelCase = self.heap[curr_pos]
_lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_up(lowercase__ )
return None
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.position_map[elem]
_lowerCAmelCase = self.heap[curr_pos]
_lowerCAmelCase = get_child_left_position(lowercase__ )
_lowerCAmelCase = get_child_right_position(lowercase__ )
if child_left_position < self.elements and child_right_position < self.elements:
_lowerCAmelCase = self.heap[child_left_position]
_lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
if child_left_position < self.elements:
_lowerCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
else:
return None
if child_right_position < self.elements:
_lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
return None
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.heap[nodea_pos][0]
_lowerCAmelCase = self.heap[nodea_pos][0]
_lowerCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowerCAmelCase = nodea_pos
_lowerCAmelCase = nodea_pos
class __magic_name__ ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
if node not in self.connections:
_lowerCAmelCase = {}
self.nodes += 1
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
self.add_node(lowercase__ )
self.add_node(lowercase__ )
_lowerCAmelCase = weight
_lowerCAmelCase = weight
def A__ ( __lowerCamelCase, ):
"""simple docstring"""
_lowerCAmelCase = {node: maxsize for node in graph.connections}
_lowerCAmelCase = {node: None for node in graph.connections}
_lowerCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase_, lowerCAmelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowerCAmelCase = priority_queue.extract_min()
_lowerCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_, dist[neighbour] )
_lowerCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowerCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase_, dist[neighbour] )
_lowerCAmelCase = node
return dist, parent
| 703
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _lowerCamelCase ( self , __magic_name__=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__magic_name__ ) )
_lowerCAmelCase = np.random.RandomState(__magic_name__ )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
# warmup pass to apply optimizations
_lowerCAmelCase = pipe(**self.get_dummy_inputs() )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = False
return options
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowerCAmelCase = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = 'A fantasy landscape, trending on artstation'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=__magic_name__ , image=__magic_name__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__magic_name__ , output_type='np' , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_lowerCAmelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowerCAmelCase = init_image.resize((7_6_8, 5_1_2) )
_lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = 'A fantasy landscape, trending on artstation'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=__magic_name__ , image=__magic_name__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__magic_name__ , output_type='np' , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_lowerCAmelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 309
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Any=2 , _lowercase : str=True , _lowercase : List[Any]=False , _lowercase : Optional[Any]=10 , _lowercase : Tuple=3 , _lowercase : int=32 * 8 , _lowercase : str=32 * 8 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=64 , ):
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_size
SCREAMING_SNAKE_CASE__ : Dict = max_size
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dim
SCREAMING_SNAKE_CASE__ : str = hidden_dim
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
SCREAMING_SNAKE_CASE__ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE__ : List[str] = self.num_queries
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE__ : int = self.num_channels
SCREAMING_SNAKE_CASE__ : List[str] = 64
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_28
SCREAMING_SNAKE_CASE__ : int = self.hidden_dim
SCREAMING_SNAKE_CASE__ : int = self.hidden_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hidden_dim
return config
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] , _lowercase : int , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Any = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_layers )
def lowercase__ ( self : str , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaskaFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_lowercase , output_hidden_states=_lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = MaskaFormerForUniversalSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
comm_check_on_output(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase : str = False
lowerCamelCase : Dict = False
lowerCamelCase : Dict = False
lowerCamelCase : Any = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self : str ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase__ ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def lowercase__ ( self : Dict ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskaFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowercase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowercase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowercase ).long(),
}
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = MaskaFormerForUniversalSegmentation(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase ).to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ :Optional[int] = 1e-4
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self : Any ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE__ : int = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE__ : str = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Tuple = inputs['''pixel_values'''].to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = [el.to(_lowercase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ : List[Any] = [el.to(_lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 35
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 1
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
SCREAMING_SNAKE_CASE__ : Any = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
a :str = input("Enter a string ").strip()
a :List[Any] = is_isogram(input_str)
print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
| 12
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : int = 1
while repunit:
SCREAMING_SNAKE_CASE__ : str = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowercase ( __lowerCAmelCase = 100_0000 ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 12
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar('KEY')
SCREAMING_SNAKE_CASE_ = TypeVar('VAL')
@dataclass(frozen=__lowerCAmelCase , slots=__lowerCAmelCase )
class a ( Generic[KEY, VAL] ):
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class a ( _Item ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
def __bool__( self ):
'''simple docstring'''
return False
SCREAMING_SNAKE_CASE_ = _DeletedItem()
class a ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , snake_case_ = 8 , snake_case_ = 0.7_5 ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = initial_block_size
__UpperCAmelCase: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCAmelCase: int = capacity_factor
__UpperCAmelCase: Union[str, Any] = 0
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return hash(snake_case_ ) % len(self._buckets )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self._buckets[ind]
if not stored:
__UpperCAmelCase: List[str] = _Item(snake_case_ , snake_case_ )
self._len += 1
return True
elif stored.key == key:
__UpperCAmelCase: int = _Item(snake_case_ , snake_case_ )
return True
else:
return False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCAmelCase: Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self._buckets
__UpperCAmelCase: Tuple = [None] * new_size
__UpperCAmelCase: int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: int = self._get_bucket_index(snake_case_ )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCAmelCase: Optional[Any] = self._get_next_ind(snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
for ind in self._iterate_buckets(snake_case_ ):
if self._try_set(snake_case_ , snake_case_ , snake_case_ ):
break
def __setitem__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(snake_case_ , snake_case_ )
def __delitem__( self , snake_case_ ):
'''simple docstring'''
for ind in self._iterate_buckets(snake_case_ ):
__UpperCAmelCase: int = self._buckets[ind]
if item is None:
raise KeyError(snake_case_ )
if item is _deleted:
continue
if item.key == key:
__UpperCAmelCase: str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , snake_case_ ):
'''simple docstring'''
for ind in self._iterate_buckets(snake_case_ ):
__UpperCAmelCase: Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case_ )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = """ ,""".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 523
|
'''simple docstring'''
from math import pi
def UpperCamelCase__ ( _lowercase : int , _lowercase : int ) -> float:
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 523
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 542
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 542
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : Tuple = logging.get_logger(__name__)
lowercase : int = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Dict = '''swin'''
UpperCAmelCase_ : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __UpperCAmelCase=2_24 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
A : Union[str, Any] = image_size
A : List[str] = patch_size
A : Dict = num_channels
A : Tuple = embed_dim
A : Dict = depths
A : Tuple = len(__UpperCAmelCase )
A : List[str] = num_heads
A : Union[str, Any] = window_size
A : Tuple = mlp_ratio
A : Optional[int] = qkv_bias
A : List[Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Optional[Any] = drop_path_rate
A : List[Any] = hidden_act
A : Any = use_absolute_embeddings
A : List[str] = layer_norm_eps
A : int = initializer_range
A : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A : str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
A : Dict = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
A , A : List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Dict = version.parse('''1.11''' )
@property
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case ( self ) -> float:
return 1E-4
| 542
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
super().__init__()
self.register_modules(vqvae=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
A : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCAmelCase , )
A : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A : str = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A : List[str] = {}
if accepts_eta:
A : Optional[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A : Any = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
A : Dict = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A : int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# decode the image latents with the VAE
A : str = self.vqvae.decode(__UpperCAmelCase ).sample
A : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
A : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : Union[str, Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 542
| 1
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = test_results.split(""" """ )
A_ = 0
A_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A_ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = {}
A_ = None
A_ = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", UpperCAmelCase__ ):
A_ = True
A_ = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A_ = line
A_ = False
return failures
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = title
A_ = doc_test_results["""time_spent"""].split(""",""" )[0]
A_ = doc_test_results["""success"""]
A_ = doc_test_results["""failures"""]
A_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
A_ = doc_test_results
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = [self._time_spent]
A_ = 0
for time in time_spent:
A_ = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase__ ) == 1:
A_ = [0, 0, time_parts[0]]
A_ , A_ , A_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
A_ , A_ , A_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(UpperCamelCase__ )}h{int(UpperCamelCase__ )}m{int(UpperCamelCase__ )}s'''
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = 40
A_ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
A_ = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase__ )
@staticmethod
def snake_case_ ( ) -> Optional[int]:
'''simple docstring'''
A_ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(UpperCamelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase__ , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A_ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
A_ = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase__ , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = """"""
for key, value in failures.items():
A_ = value[:200] + """ [Truncated]""" if len(UpperCamelCase__ ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A_ = job_name
A_ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A_ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case_ ( self ) -> int:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A_ = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A_ = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A_ = f'''*Num failures* :{len(job_result["failed"] )} \n'''
A_ = job_result["""failures"""]
A_ = self.get_reply_blocks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text=UpperCamelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f'''Results for {job}''' , blocks=UpperCamelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = os.environ["""GITHUB_RUN_ID"""]
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A_ = requests.get(UpperCAmelCase__ ).json()
A_ = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(UpperCAmelCase__ ):
A_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", UpperCAmelCase__ )
return {}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = {}
if os.path.exists(UpperCAmelCase__ ):
A_ = os.listdir(UpperCAmelCase__ )
for file in files:
try:
with open(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), encoding="""utf-8""" ) as f:
A_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCAmelCase__, UpperCAmelCase__ )}.''' ) from e
return _artifact
def UpperCAmelCase__ ( ) -> Optional[Any]:
class A__ :
def __init__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = name
A_ = []
def __str__( self ) -> Dict:
'''simple docstring'''
return self.name
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A_ = {}
A_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
A_ = directory
if artifact_name not in _available_artifacts:
A_ = Artifact(UpperCAmelCase__ )
_available_artifacts[artifact_name].add_path(UpperCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase = get_job_links()
__lowerCamelCase = retrieve_available_artifacts()
__lowerCamelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase = github_actions_job_links.get('''run_doctests''')
__lowerCamelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
__lowerCamelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = handle_test_results(artifact['''stats'''])
__lowerCamelCase = failed
__lowerCamelCase = success
__lowerCamelCase = time_spent[1:-1] + ''', '''
__lowerCamelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
__lowerCamelCase = line.replace('''FAILED ''', '''''')
__lowerCamelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
__lowerCamelCase , __lowerCamelCase = line.split('''::''')
else:
__lowerCamelCase , __lowerCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase = all_failures[test] if test in all_failures else '''N/A'''
__lowerCamelCase = failure
break
__lowerCamelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 667
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a__ : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ : Optional[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
a__ : Optional[Any] = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
a__ : Optional[Any] = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class __magic_name__ ( __UpperCAmelCase ):
UpperCamelCase : Tuple = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCamelCase__ ) != tokenize_chinese_chars
):
_lowerCAmelCase = getattr(lowerCamelCase__ , normalizer_state.pop('type' ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = tokenize_chinese_chars
_lowerCAmelCase = normalizer_class(**lowerCamelCase__ )
_lowerCAmelCase = do_lower_case
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=None ):
"""simple docstring"""
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ = None ):
"""simple docstring"""
_lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 589
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def _A ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
lowercase__ = bnb_quantization_config.load_in_abit
lowercase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
lowercase__ = []
# custom device map
if isinstance(lowercase__ , lowercase__ ) and len(device_map.keys() ) > 1:
lowercase__ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__ = get_keys_to_not_convert(lowercase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase__ )
lowercase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__ = []
lowercase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase__ )
# compatibility with peft
lowercase__ = load_in_abit
lowercase__ = load_in_abit
lowercase__ = get_parameter_device(lowercase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
lowercase__ = replace_with_bnb_layers(lowercase__ , lowercase__ , modules_to_not_convert=lowercase__ )
# convert param to the right dtype
lowercase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
lowercase__ = getattr(lowercase__ , lowercase__ , lowercase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase__ ):
param.to(lowercase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase__ = replace_with_bnb_layers(
lowercase__ , lowercase__ , modules_to_not_convert=lowercase__ )
lowercase__ = get_quantized_model_device_map(
lowercase__ , lowercase__ , lowercase__ , max_memory=lowercase__ , no_split_module_classes=lowercase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__ = True
lowercase__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowercase__ , lowercase__ , lowercase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase__ , offload_state_dict=lowercase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase__ , device_map=lowercase__ , offload_dir=lowercase__ )
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase__ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowercase__ , lowercase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
lowercase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__ = {}
lowercase__ = special_dtypes
lowercase__ = no_split_module_classes
lowercase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__ = get_balanced_memory(
lowercase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowercase__ , **lowercase__ , )
lowercase__ = max_memory
lowercase__ = infer_auto_device_map(lowercase__ , **lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
# check if don't have any quantized module on the cpu
lowercase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if modules_to_not_convert is None:
lowercase__ = []
lowercase__ , lowercase__ = _replace_with_bnb_layers(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ):
lowercase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__ = []
current_key_name.append(lowercase__ )
if isinstance(lowercase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__ = """.""".join(lowercase__ )
lowercase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
lowercase__ = module.weight.data
if module.bias is not None:
lowercase__ = module.bias.data
bnb_module.requires_grad_(lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
lowercase__ = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ = _replace_with_bnb_layers(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowercase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _A ( lowercase__ ):
# Create a copy of the model
with init_empty_weights():
lowercase__ = deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__ = find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__ , lowercase__ ):
lowercase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ = sum(lowercase__ , [] )
lowercase__ = len(lowercase__ ) > 0
# Check if it is a base model
lowercase__ = False
if hasattr(lowercase__ , """base_model_prefix""" ):
lowercase__ = not hasattr(lowercase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ = list(model.named_children() )
lowercase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ = set(lowercase__ ) - set(lowercase__ )
lowercase__ = list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
lowercase__ = [""".weight""", """.bias"""]
lowercase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ = name.replace(lowercase__ , """""" )
filtered_module_names.append(lowercase__ )
return filtered_module_names
def _A ( lowercase__ ):
for m in model.modules():
if isinstance(lowercase__ , bnb.nn.Linearabit ):
return True
return False
def _A ( lowercase__ ):
return next(parameter.parameters() ).device
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase__ , lowercase__ , 0 , dtype=lowercase__ , value=lowercase__ )
lowercase__ = param_name
lowercase__ = model
if "." in tensor_name:
lowercase__ = tensor_name.split(""".""" )
for split in splits[:-1]:
lowercase__ = getattr(lowercase__ , lowercase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
lowercase__ = new_module
lowercase__ = splits[-1]
# offload weights
lowercase__ = False
offload_weight(module._parameters[tensor_name] , lowercase__ , lowercase__ , index=lowercase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowercase__ , index=lowercase__ , )
else:
offload_weight(lowercase__ , lowercase__ , lowercase__ , index=lowercase__ )
offload_weight(lowercase__ , param_name.replace("""weight""" , """SCB""" ) , lowercase__ , index=lowercase__ )
set_module_tensor_to_device(lowercase__ , lowercase__ , """meta""" , dtype=lowercase__ , value=torch.empty(*param.size() ) )
| 325
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase_ ( a_ ):
_A : Any = 'big_bird'
def __init__( self , snake_case__=5_03_58 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=40_96 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 378
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCamelCase_ ( a_ ):
_A : Tuple = 'canine'
def __init__( self , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_63_84 , snake_case__=16 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=0xE_0_0_0 , snake_case__=0xE_0_0_1 , snake_case__=4 , snake_case__=4 , snake_case__=8 , snake_case__=1_63_84 , snake_case__=1_28 , **snake_case__ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
# Character config:
UpperCAmelCase = downsampling_rate
UpperCAmelCase = upsampling_kernel_size
UpperCAmelCase = num_hash_functions
UpperCAmelCase = num_hash_buckets
UpperCAmelCase = local_transformer_stride
| 378
| 1
|
from ...processing_utils import ProcessorMixin
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'feature_extractor']
lowercase_ = 'TvltImageProcessor'
lowercase_ = 'TvltFeatureExtractor'
def __init__( self : int , a_ : List[str] , a_ : Dict )-> str:
"""simple docstring"""
super().__init__(image_processor=a_ , feature_extractor=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor
SCREAMING_SNAKE_CASE__ : Any = feature_extractor
def __call__( self : Optional[Any] , a_ : int=None , a_ : str=None , a_ : int=None , a_ : Tuple=None , a_ : Tuple=False , a_ : Dict=False , *a_ : Tuple , **a_ : Union[str, Any] , )-> List[str]:
"""simple docstring"""
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor(a_ , mask_pixel=a_ , *a_ , **a_ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor(a_ , is_mixed=a_ , *a_ , **a_ )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor(
a_ , *a_ , sampling_rate=a_ , mask_audio=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
if audio is not None:
output_dict.update(a_ )
if images is not None:
output_dict.update(a_ )
if images_mixed_dict is not None:
output_dict.update(a_ )
return output_dict
@property
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 85
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 1
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 2**power
lowerCAmelCase : Dict = str(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase__ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase__ = solution(power)
print('''Sum of the digits is: ''', result)
| 681
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681
| 1
|
from heapq import heappop, heappush
import numpy as np
def _a ( lowercase__ : np.ndarray , lowercase__ : tuple[int, int] , lowercase__ : tuple[int, int] , lowercase__ : bool , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid.shape
SCREAMING_SNAKE_CASE__ : Optional[Any] = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE__ : Tuple = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = [(0, source)], set()
SCREAMING_SNAKE_CASE__ : str = np.full((rows, cols) , np.inf )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = np.empty((rows, cols) , dtype=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = None
while queue:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Optional[Any] = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE__ : Any = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE__ : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ , (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE__ : List[str] = dist + 1
SCREAMING_SNAKE_CASE__ : Any = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
"""simple docstring"""
def A_ ( __lowercase , __lowercase , __lowercase ):
if len(__lowercase ) != len(__lowercase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase_ : int =[p / w for p, w in zip(__lowercase , __lowercase )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase_ : Optional[int] =sorted(__lowercase )
# declaring useful variables
UpperCamelCase_ : Optional[int] =len(__lowercase )
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : List[Any] =0
UpperCamelCase_ : Optional[int] =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase_ : List[str] =sorted_profit_by_weight[length - i - 1]
UpperCamelCase_ : Optional[Any] =profit_by_weight.index(__lowercase )
UpperCamelCase_ : str =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__SCREAMING_SNAKE_CASE = [int(x) for x in input('Input profits separated by spaces: ').split()]
__SCREAMING_SNAKE_CASE = [int(x) for x in input('Input weights separated by spaces: ').split()]
__SCREAMING_SNAKE_CASE = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 357
| 0
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCamelCase ( lowercase_ ):
lowercase = 'bart'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,__UpperCamelCase=5_0265 ,__UpperCamelCase=1024 ,__UpperCamelCase=12 ,__UpperCamelCase=4096 ,__UpperCamelCase=16 ,__UpperCamelCase=12 ,__UpperCamelCase=4096 ,__UpperCamelCase=16 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase="gelu" ,__UpperCamelCase=1024 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=0.0 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=3 ,__UpperCamelCase=1 ,__UpperCamelCase=0 ,__UpperCamelCase=2 ,__UpperCamelCase=True ,__UpperCamelCase=2 ,__UpperCamelCase=2 ,**__UpperCamelCase ,) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Optional[int] = d_model
lowercase_ : Tuple = encoder_ffn_dim
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : List[str] = encoder_attention_heads
lowercase_ : Optional[Any] = decoder_ffn_dim
lowercase_ : str = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Union[str, Any] = dropout
lowercase_ : Any = attention_dropout
lowercase_ : Any = activation_dropout
lowercase_ : Dict = activation_function
lowercase_ : List[str] = init_std
lowercase_ : Tuple = encoder_layerdrop
lowercase_ : str = decoder_layerdrop
lowercase_ : Any = classifier_dropout
lowercase_ : int = use_cache
lowercase_ : List[Any] = encoder_layers
lowercase_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCamelCase ,pad_token_id=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,is_encoder_decoder=__UpperCamelCase ,decoder_start_token_id=__UpperCamelCase ,forced_eos_token_id=__UpperCamelCase ,**__UpperCamelCase ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,__UpperCamelCase ):
lowercase_ : Optional[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
class UpperCamelCase ( lowercase_ ):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowercase_ : Optional[Any] = {0: 'batch'}
lowercase_ : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase_ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
lowercase_ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase_ : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowercase_ : Union[str, Any] = self.num_layers
for i in range(__UpperCamelCase ):
lowercase_ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase_ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowercase_ : List[str] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : Union[str, Any] = super().outputs
else:
lowercase_ : Tuple = super(__UpperCamelCase ,self ).outputs
if self.use_past:
lowercase_ : Optional[Any] = self.num_layers
for i in range(__UpperCamelCase ):
lowercase_ : Any = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase_ : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,) -> Mapping[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Generate decoder inputs
lowercase_ : Optional[Any] = seq_length if not self.use_past else 1
lowercase_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Optional[int] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase_ : Dict = dict(**__UpperCamelCase ,**__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase_ : Optional[int] = common_inputs['input_ids'].shape
lowercase_ : str = common_inputs['decoder_input_ids'].shape[1]
lowercase_ : List[str] = self.num_attention_heads
lowercase_ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ : Any = decoder_seq_length + 3
lowercase_ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase_ : List[str] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__UpperCamelCase ,__UpperCamelCase )] ,dim=1 )
lowercase_ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase_ : str = self.num_layers
lowercase_ : Optional[Any] = min(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = max(__UpperCamelCase ,__UpperCamelCase ) - min_num_layers
lowercase_ : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
lowercase_ : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__UpperCamelCase ,__UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,) -> Mapping[str, Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase_ : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase_ : Union[str, Any] = seqlen + 2
lowercase_ : Tuple = self.num_layers
lowercase_ : Union[str, Any] = self.num_attention_heads
lowercase_ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ : List[Any] = common_inputs['attention_mask'].dtype
lowercase_ : Optional[Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(__UpperCamelCase ,__UpperCamelCase ,dtype=__UpperCamelCase )] ,dim=1 )
lowercase_ : Any = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,) -> Mapping[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = compute_effective_axis_dimension(
__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ : int = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
lowercase_ : Optional[Any] = compute_effective_axis_dimension(
__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase_ : Tuple = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase_ : int = dict(tokenizer(__UpperCamelCase ,return_tensors=__UpperCamelCase ) )
return common_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase ,batch_size=__UpperCamelCase ,seq_length=__UpperCamelCase ,is_pair=__UpperCamelCase ,framework=__UpperCamelCase )
elif self.task == "causal-lm":
lowercase_ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase ,batch_size=__UpperCamelCase ,seq_length=__UpperCamelCase ,is_pair=__UpperCamelCase ,framework=__UpperCamelCase )
else:
lowercase_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCamelCase ,batch_size=__UpperCamelCase ,seq_length=__UpperCamelCase ,is_pair=__UpperCamelCase ,framework=__UpperCamelCase )
return common_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase_ : Dict = super()._flatten_past_key_values_(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
lowercase_ : Tuple = super(__UpperCamelCase ,self )._flatten_past_key_values_(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
| 716
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=4 ,) -> str:
'''simple docstring'''
lowercase_ : str = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : str = is_training
lowercase_ : Any = use_attention_mask
lowercase_ : int = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : int = vocab_size
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : str = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Any = num_choices
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[Any] = None
if self.use_attention_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=__UpperCamelCase ,)
return config, input_ids, attention_mask
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : str = config_and_inputs
lowercase_ : List[str] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase_ : Optional[int] = model_class_name.from_pretrained('distilbert-base-uncased' )
lowercase_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase_ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase_ : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase_ : Any = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )[0]
lowercase_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Union[str, Any] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,__UpperCamelCase ,atol=1e-4 ) )
| 477
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( ):
__magic_name__ : Optional[int] ="""https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
__magic_name__ : Optional[int] =Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert("""RGB""" )
return image
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[Any] =[]
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =dct.pop(lowerCamelCase )
__magic_name__ : Optional[int] =val
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ : List[Any] =state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
__magic_name__ : List[Any] =state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
__magic_name__ : List[Any] =torch.cat((q_bias, torch.zeros_like(lowerCamelCase , requires_grad=lowerCamelCase ), v_bias) )
__magic_name__ : int =qkv_bias
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =364 if """coco""" in model_name else 224
__magic_name__ : Dict =BlipaVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ : List[Any] =OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ : int =OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ : Dict =TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ : Union[str, Any] =TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
__magic_name__ : Dict =BlipaConfig(vision_config=lowerCamelCase , text_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
__magic_name__ : List[Any] =(
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
__magic_name__ : List[str] =tokenizer("""\n""" , add_special_tokens=lowerCamelCase ).input_ids[0]
__magic_name__ , __magic_name__ : Dict =get_blipa_config(lowerCamelCase , eos_token_id=lowerCamelCase )
__magic_name__ : Union[str, Any] =BlipaForConditionalGeneration(lowerCamelCase ).eval()
__magic_name__ : List[Any] ={
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
__magic_name__ , __magic_name__ : int =model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__magic_name__ : List[str] ="""cuda""" if torch.cuda.is_available() else """cpu"""
__magic_name__ , __magic_name__ , __magic_name__ : str =load_model_and_preprocess(
name=lowerCamelCase , model_type=lowerCamelCase , is_eval=lowerCamelCase , device=lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__magic_name__ : Tuple =original_model.state_dict()
__magic_name__ : str =create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ : Optional[Any] =state_dict.pop(lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
__magic_name__ : Tuple =key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__magic_name__ : str =key.replace("""self""" , """attention""" )
if "opt_proj" in key:
__magic_name__ : Optional[int] =key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
__magic_name__ : Tuple =key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
__magic_name__ : Union[str, Any] =key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
__magic_name__ : List[str] =key.replace("""t5""" , """language""" )
__magic_name__ : List[str] =val
# read in qv biases
read_in_q_v_bias(lowerCamelCase , lowerCamelCase )
__magic_name__ , __magic_name__ : int =hf_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert len(lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ : int =load_demo_image()
__magic_name__ : Tuple =vis_processors["""eval"""](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
__magic_name__ : List[Any] =tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
# create processor
__magic_name__ : Union[str, Any] =BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowerCamelCase , image_std=lowerCamelCase )
__magic_name__ : Optional[int] =BlipaProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
__magic_name__ : Optional[Any] =processor(images=lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ : List[Any] =original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
__magic_name__ : List[str] =hf_model(lowerCamelCase , lowerCamelCase ).logits
else:
__magic_name__ : Union[str, Any] =original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
__magic_name__ : Any =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__magic_name__ : List[str] =hf_model(lowerCamelCase , lowerCamelCase , labels=lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ : Tuple =torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ : Union[str, Any] =torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowerCamelCase )
else:
# cast to same type
__magic_name__ : Any =logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase ) , lowerCamelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
__magic_name__ : Dict =""""""
__magic_name__ : List[Any] =tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
__magic_name__ : List[str] =original_model.generate({"""image""": original_pixel_values} )
__magic_name__ : int =hf_model.generate(
lowerCamelCase , lowerCamelCase , do_sample=lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , lowerCamelCase )
__magic_name__ : List[Any] =input_ids.shape[1]
__magic_name__ : int =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase )
__magic_name__ : int =[text.strip() for text in output_text]
print("""HF generation:""" , lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
UpperCAmelCase_ : Dict = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase__ :
'''simple docstring'''
_snake_case = BlenderbotConfig
_snake_case = {}
_snake_case = """gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=2_0 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=lowerCAmelCase__ ).get_decoder()
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict["attention_mask"][:1, :]
UpperCamelCase = inputs_dict["head_mask"]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-3 )
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : Dict=None, ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(_lowercase, config.pad_token_id), tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.inta),
], axis=-1, )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ ( a__, a__, unittest.TestCase ):
'''simple docstring'''
_snake_case = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_snake_case = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_snake_case = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_tokenizers
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ["""My friends are cool but they eat too many carbs."""]
_snake_case = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 709
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : Optional[int]):
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCAmelCase) if (len(_UpperCAmelCase) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8), '''Stack'''.center(_UpperCAmelCase), '''Postfix'''.center(_UpperCAmelCase), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCAmelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCAmelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCAmelCase) == 0:
stack.append(_UpperCAmelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCAmelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCAmelCase) # push x to stack
print(
x.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
while len(_UpperCAmelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), (''''''.join(_UpperCAmelCase)).ljust(_UpperCAmelCase), sep=''' | ''', ) # Output in tabular format
return "".join(_UpperCAmelCase) # return Postfix as str
def __snake_case ( _UpperCAmelCase : str):
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCAmelCase)):
if infix[i] == "(":
UpperCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCAmelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case_ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation
snake_case_ : Union[str, Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 350
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a: Tuple = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[Any] = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__a: Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 152
|
'''simple docstring'''
from collections.abc import Sequence
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = False ):
if not arr:
return 0
lowercase__ : Tuple = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__ : int = 0.0
for num in arr:
lowercase__ : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__ : int = max(UpperCAmelCase , UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__a: List[str] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 152
| 1
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE_: List[Any] =True
except ImportError:
SCREAMING_SNAKE_CASE_: Tuple =False
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_ ( snake_case_ : Namespace ) -> Tuple:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __A ( UpperCamelCase__ ):
@staticmethod
def _lowercase (__a : ArgumentParser ):
UpperCAmelCase_ = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=__a , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=__a , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=__a )
def __init__(self : int , __a : bool , __a : str , __a : int=None , *__a : Union[str, Any] ):
UpperCAmelCase_ = testing
UpperCAmelCase_ = testing_file
UpperCAmelCase_ = path
def _lowercase (self : Optional[Any] ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(__a ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
UpperCAmelCase_ = (
Path(__a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase_ = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__a ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(__a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__a , extra_context=__a , )
UpperCAmelCase_ = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
UpperCAmelCase_ = json.load(__a )
UpperCAmelCase_ = configuration["lowercase_modelname"]
UpperCAmelCase_ = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f"""{directory}/configuration.json""" )
UpperCAmelCase_ = "PyTorch" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "TensorFlow" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = "Flax" in generate_tensorflow_pytorch_and_flax
UpperCAmelCase_ = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__a , exist_ok=__a )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__a )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__a : Optional[Any] ):
with open(__a , "r" ) as f:
UpperCAmelCase_ = f.readlines()
with open(__a , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__a : str , __a : str , __a : List[str] ):
# Create temp file
UpperCAmelCase_ , UpperCAmelCase_ = mkstemp()
UpperCAmelCase_ = False
with fdopen(__a , "w" ) as new_file:
with open(__a ) as old_file:
for line in old_file:
new_file.write(__a )
if line_to_copy_below in line:
UpperCAmelCase_ = True
for line_to_copy in lines_to_copy:
new_file.write(__a )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__a , __a )
# Remove original file
remove(__a )
# Move new file
move(__a , __a )
def skip_units(__a : List[str] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__a : Optional[Any] ):
with open(__a ) as datafile:
UpperCAmelCase_ = []
UpperCAmelCase_ = False
UpperCAmelCase_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(__a )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase_ = line.split("\"" )[1]
UpperCAmelCase_ = skip_units(__a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__a , __a , __a )
UpperCAmelCase_ = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase_ = []
elif "##" not in line:
lines_to_copy.append(__a )
remove(__a )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__a )
| 719
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE_: Optional[Any] =2_99_79_24_58
# Symbols
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Optional[Any] =symbols('ct x y z')
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowerCAmelCase_ ( snake_case_ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(snake_case_ ) ** 2 )
def lowerCAmelCase_ ( snake_case_ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(snake_case_ ), -gamma(snake_case_ ) * beta(snake_case_ ), 0, 0],
[-gamma(snake_case_ ) * beta(snake_case_ ), gamma(snake_case_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
UpperCAmelCase_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE_: Optional[Any] =transform(29_97_92_45)
print('Example of four vector: ')
print(f"ct' = {four_vector[0]}")
print(f"x' = {four_vector[1]}")
print(f"y' = {four_vector[2]}")
print(f"z' = {four_vector[3]}")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE_: Any ={ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE_: List[Any] =[four_vector[i].subs(sub_dict) for i in range(4)]
print(f"\n{numerical_vector}")
| 415
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase_ = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 290
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 678
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A ) -> Tuple:
'''simple docstring'''
__magic_name__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__magic_name__ = len(A ) - 1
def __A ( self , A ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ) , 5 ) == 1
return output_values
def __A ( self , A ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__magic_name__ = self.basis_function(A )
__magic_name__ = 0.0
__magic_name__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , A = 0.01 ) -> Tuple:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
__magic_name__ = [] # x coordinates of points to plot
__magic_name__ = [] # y coordinates of points to plot
__magic_name__ = 0.0
while t <= 1:
__magic_name__ = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__magic_name__ = [i[0] for i in self.list_of_points]
__magic_name__ = [i[1] for i in self.list_of_points]
plt.plot(
A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(A , A , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 678
| 1
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81
|
from __future__ import annotations
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
# Checks if the entire collection has been sorted
if len(lowerCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCAmelCase_ , n - 1 )
rec_insertion_sort(lowerCAmelCase_ , n - 1 )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
# Checks order between adjacent elements
if index >= len(lowerCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase , UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCAmelCase_ , index + 1 )
if __name__ == "__main__":
__a = input("""Enter integers separated by spaces: """)
__a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 377
| 0
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__A : Optional[int] = parser.parse_args()
__A : Tuple = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 714
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=0 ):
'''simple docstring'''
# Format the message.
if name is None:
snake_case_ : Tuple = None
else:
snake_case_ : Optional[Any] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case_ : Optional[Any] = fmt.format(lowerCamelCase_ )
# Print and recurse (if needed).
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if msg is not None:
print(lowerCamelCase_ )
for k in val.keys():
recursive_print(lowerCamelCase_ , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
print(lowerCamelCase_ , """:""" , val.size() )
else:
print(lowerCamelCase_ , """:""" , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int ):
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : List[Any] = param.view(*lowerCamelCase_ )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : int = param.view(*lowerCamelCase_ )
snake_case_ : str = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*lowerCamelCase_ )
return param
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple ):
'''simple docstring'''
# The converted output model.
snake_case_ : Tuple = {}
# old versions did not store training args
snake_case_ : Optional[Any] = input_state_dict.get("""args""" , lowerCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Optional[int] = ds_args.padded_vocab_size
snake_case_ : str = ds_args.max_position_embeddings
snake_case_ : Tuple = ds_args.hidden_size
snake_case_ : List[str] = ds_args.num_layers
snake_case_ : Union[str, Any] = ds_args.num_attention_heads
snake_case_ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : int = config.n_head
# The hidden_size per head.
snake_case_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : Dict = 0.0
# The model.
snake_case_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
snake_case_ : Optional[Any] = model["""language_model"""]
# The embeddings.
snake_case_ : int = lm["""embedding"""]
# The word embeddings.
snake_case_ : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Any = word_embeddings[: config.vocab_size, :]
snake_case_ : Union[str, Any] = word_embeddings
# The position embeddings.
snake_case_ : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case_ : List[str] = pos_embeddings
# The transformer.
snake_case_ : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Union[str, Any] = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : Tuple = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : List[Any] = layer_re.match(lowerCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Any = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : List[str] = m.group(3 )
# The name of the layer.
snake_case_ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : List[str] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : Optional[Any] = masked_bias
snake_case_ : Optional[Any] = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Dict = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Store. No change of shape.
snake_case_ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : str = transformer["""final_layernorm.weight"""]
snake_case_ : int = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase ( ):
'''simple docstring'''
# Create the argument parser.
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase_ , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : Tuple = parser.parse_args()
# Extract the basename.
snake_case_ : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Dict = torch.load(lowerCamelCase_ , map_location="""cpu""" )
else:
snake_case_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Optional[int] = input_state_dict.get("""args""" , lowerCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Tuple = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : int = """gelu_new"""
else:
snake_case_ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : int = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : int = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowerCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase_ , summary_activation=lowerCamelCase_ , summary_proj_to_labels=lowerCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
snake_case_ : int = GPTaConfig.from_json_file(args.config_file )
snake_case_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Optional[Any] = convert_megatron_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase_ , lowerCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : List[str] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
snake_case_ : int = type(lowerCamelCase_ ).__name__
snake_case_ : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase_ )
# Store the state_dict to file.
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 267
| 0
|
import random
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ) -> List[str]:
__snake_case = a[left_index]
__snake_case = left_index + 1
for j in range(left_index + 1 , snake_case_ ):
if a[j] < pivot:
__snake_case = a[i], a[j]
i += 1
__snake_case = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ) -> List[str]:
if left < right:
__snake_case = random.randint(snake_case_ , right - 1 )
__snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__snake_case = partition(snake_case_ , snake_case_ , snake_case_ )
quick_sort_random(
snake_case_ , snake_case_ , snake_case_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case_ , pivot_index + 1 , snake_case_ ) # recursive quicksort to the right of the pivot point
def lowerCamelCase__ ( ) -> Any:
__snake_case = input('''Enter numbers separated by a comma:\n''' ).strip()
__snake_case = [int(snake_case_ ) for item in user_input.split(''',''' )]
quick_sort_random(snake_case_ , 0 , len(snake_case_ ) )
print(snake_case_ )
if __name__ == "__main__":
main()
| 592
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0
| 0
|
def lowercase ( _a ) -> list:
UpperCAmelCase_: Optional[Any] = len(_a )
for i in range(1 ,_a ):
UpperCAmelCase_: List[str] = collection[i]
UpperCAmelCase_: List[str] = 0
UpperCAmelCase_: Optional[Any] = i - 1
while low <= high:
UpperCAmelCase_: Union[str, Any] = (low + high) // 2
if val < collection[mid]:
UpperCAmelCase_: Optional[Any] = mid - 1
else:
UpperCAmelCase_: Tuple = mid + 1
for j in range(_a ,_a ,-1 ):
UpperCAmelCase_: Optional[int] = collection[j - 1]
UpperCAmelCase_: str = val
return collection
if __name__ == "__main__":
_lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 306
|
import os
def lowercase ( _a = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(_a ) ,_a ) ) as in_file:
UpperCAmelCase_: str = in_file.read()
UpperCAmelCase_: Union[str, Any] = [[int(_a ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase_: List[Any] = [[0 for cell in row] for row in grid]
UpperCAmelCase_: Any = len(grid[0] )
UpperCAmelCase_: int = [[0 for i in range(_a )] for j in range(_a )]
UpperCAmelCase_: int = grid[0][0]
for i in range(1 ,_a ):
UpperCAmelCase_: List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 ,_a ):
UpperCAmelCase_: Any = grid[i][0] + dp[i - 1][0]
for i in range(1 ,_a ):
for j in range(1 ,_a ):
UpperCAmelCase_: Union[str, Any] = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 306
| 1
|
"""simple docstring"""
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Union[str, Any] = len(A_ )
for i in range(n - 1 ):
for j in range(i + 1, A_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if len(A_ ) <= 1:
return arr, 0
_lowerCamelCase : int = len(A_ ) // 2
_lowerCamelCase : int = arr[0:mid]
_lowerCamelCase : int = arr[mid:]
_lowerCamelCase , _lowerCamelCase : str = count_inversions_recursive(A_ )
_lowerCamelCase , _lowerCamelCase : int = count_inversions_recursive(A_ )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = _count_cross_inversions(A_, A_ )
_lowerCamelCase : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case_ ( A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = 0
while i < len(A_ ) and j < len(A_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCamelCase : List[str] = count_inversions_bf(A_ )
_lowerCamelCase , _lowerCamelCase : List[str] = count_inversions_recursive(A_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''', A_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCamelCase : List[Any] = count_inversions_bf(A_ )
_lowerCamelCase , _lowerCamelCase : List[Any] = count_inversions_recursive(A_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', A_ )
# an empty list should also have zero inversions
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = count_inversions_bf(A_ )
_lowerCamelCase , _lowerCamelCase : Any = count_inversions_recursive(A_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', A_ )
if __name__ == "__main__":
main()
| 83
|
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase ( unittest.TestCase ):
def UpperCAmelCase (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = logging.get_logger()
# the current default level is logging.WARNING
lowerCAmelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = logging.get_verbosity()
lowerCAmelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowerCAmelCase = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,'''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + '''\n''' )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def UpperCAmelCase (self : Any ) -> str:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCAmelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowerCAmelCase = os.getenv('''TRANSFORMERS_VERBOSITY''' ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = logging.log_levels[env_level_str]
lowerCAmelCase = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" ,)
# restore to the original level
lowerCAmelCase = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def UpperCAmelCase (self : List[Any] ) -> Tuple:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' ,cl.out )
# no need to restore as nothing was changed
def UpperCAmelCase (self : Dict ) -> Any:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowerCAmelCase = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,'''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE_ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE_ )
self.assertEqual(cl.out ,msg + '''\n''' )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 535
| 0
|
import torch
from diffusers import DiffusionPipeline
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
def __call__( self : Optional[int] ):
"""simple docstring"""
snake_case : Any = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case : Dict = 1
snake_case : Optional[Any] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
snake_case : List[Any] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case : List[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase__ )
return result
| 84
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case , snake_case : Any = image.size
snake_case , snake_case : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case : Dict = np.array(__magic_name__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : Tuple = torch.from_numpy(__magic_name__ )
return 2.0 * image - 1.0
class a_ ( a ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[int] = 100 , UpperCAmelCase__ : Optional[float] = 0.0 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
snake_case : Any = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
snake_case : Optional[Any] = preprocess(UpperCAmelCase__ )
snake_case , snake_case : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : str = next(self.unet.parameters() ).dtype
snake_case : Dict = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
snake_case : Any = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Optional[Any] = {}
if accepts_eta:
snake_case : Dict = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : Optional[int] = torch.cat([latents, image] , dim=1 )
snake_case : str = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Any = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCAmelCase__ ).sample
snake_case : int = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
snake_case : Dict = image / 2 + 0.5
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
| 1
|
"""simple docstring"""
import requests
A__ : Tuple = '''''' # <-- Put your OpenWeatherMap appid here!
A__ : Optional[Any] = '''https://api.openweathermap.org/data/2.5/'''
def _snake_case ( lowerCamelCase__ : int = "Chicago" , lowerCamelCase__ : Optional[int] = APPID ) -> Tuple:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def _snake_case ( lowerCamelCase__ : Optional[int] = "Kolkata, India" , lowerCamelCase__ : Optional[Any] = APPID ) -> Optional[int]:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def _snake_case ( lowerCamelCase__ : List[Any] = 55.68 , lowerCamelCase__ : Tuple = 12.57 , lowerCamelCase__ : Tuple = APPID ) -> List[str]:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
A__ : Optional[Any] = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 153
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowercase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowercase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase = model(snake_case__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowercase = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowercase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowercase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase = model(snake_case__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case__ , atol=1E-3 ) )
| 428
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2_7_6_8.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 716
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 283
| 0
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , **lowerCamelCase__ , ):
super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = Sql(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , )
def snake_case__ ( self ):
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , )
# Build dataset for splits
_lowerCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_lowerCamelCase = dataset
_lowerCamelCase = name
_lowerCamelCase = con
_lowerCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCamelCase = num_proc
_lowerCamelCase = to_sql_kwargs
def snake_case__ ( self ):
_lowerCamelCase = self.to_sql_kwargs.pop('''sql''' , lowerCamelCase__ )
_lowerCamelCase = self.to_sql_kwargs.pop('''con''' , lowerCamelCase__ )
_lowerCamelCase = self.to_sql_kwargs.pop('''index''' , lowerCamelCase__ )
_lowerCamelCase = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs )
return written
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = args
_lowerCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
_lowerCamelCase = query_table(
table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCamelCase = batch.to_pandas()
_lowerCamelCase = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ )
return num_rows or len(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , **lowerCamelCase__ ):
_lowerCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowerCamelCase , _lowerCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 661
|
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 1.6_0_2_1e-1_9 # units = C
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float , lowercase_ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661
| 1
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _A (lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
_a = {}
_a = job['started_at']
_a = job['completed_at']
_a = date_parser.parse(lowerCAmelCase__ )
_a = date_parser.parse(lowerCAmelCase__ )
_a = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
_a = start
_a = end
_a = duration_in_min
return job_info
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None ) -> int:
'''simple docstring'''
_a = None
if token is not None:
_a = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
_a = {}
try:
job_time.update({job['name']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['jobs']} )
_a = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
_a = requests.get(url + f'&page={i + 2}' , headers=lowerCAmelCase__ ).json()
job_time.update({job['name']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['jobs']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
a_ : Tuple = parser.parse_args()
a_ : Any = get_job_time(args.workflow_run_id)
a_ : Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 532
|
'''simple docstring'''
def _A (lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _A (lowerCAmelCase__ :dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
_a = 0
_a = len(lowerCAmelCase__ ) # No of vertices in graph
_a = [0] * n
_a = [False] * n
def dfs(lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ):
_a = True
_a = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , id_ )
_a = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_a = min(low[at] , low[to] )
_a = []
for i in range(lowerCAmelCase__ ):
if not visited[i]:
dfs(lowerCAmelCase__ , -1 , lowerCAmelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532
| 1
|
import requests
from bsa import BeautifulSoup
def __A ( _lowercase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_A = BeautifulSoup(requests.get(_lowercase ).text , '''html.parser''' )
_A = soup.findAll('''h1''' )
_A = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowercase , _lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 484
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __A ( _lowercase ):
'''simple docstring'''
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_A = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
_A = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
_A = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
_A = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
_A = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
_A = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
_A = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
_A = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
_A = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
_A = key.replace('''image_encoder.module''' , '''flava.image_model''' )
_A = key.replace('''text_encoder.module''' , '''flava.text_model''' )
_A = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
_A = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
_A = key.replace('''text_projection''' , '''flava.text_projection''' )
_A = key.replace('''image_projection''' , '''flava.image_projection''' )
_A = value.float()
for key, value in codebook_state_dict.items():
_A = value
return upgrade
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
if config_path is not None:
_A = FlavaConfig.from_pretrained(_lowercase )
else:
_A = FlavaConfig()
_A = FlavaForPreTraining(_lowercase ).eval()
_A = convert_dalle_checkpoint(_lowercase , _lowercase , save_checkpoint=_lowercase )
if os.path.exists(_lowercase ):
_A = torch.load(_lowercase , map_location='''cpu''' )
else:
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )
_A = upgrade_state_dict(_lowercase , _lowercase )
hf_model.load_state_dict(_lowercase )
_A = hf_model.state_dict()
_A = count_parameters(_lowercase )
_A = count_parameters(_lowercase ) + count_parameters(_lowercase )
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 484
| 1
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
def decorator(lowercase__ ):
a_ =getattr(lowercase__ , "handle_key" , [] )
handle += [key]
setattr(lowercase__ , "handle_key" , lowercase__ )
return func
return decorator
def UpperCAmelCase_ ( *lowercase__ ):
'''simple docstring'''
def decorator(lowercase__ ):
a_ =getattr(lowercase__ , "handle_key" , [] )
handle += keys
setattr(lowercase__ , "handle_key" , lowercase__ )
return func
return decorator
class UpperCAmelCase ( __a):
'''simple docstring'''
def __new__( cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
a_ =super().__new__(cls , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if not hasattr(lowerCAmelCase_ , "key_handler"):
setattr(lowerCAmelCase_ , "key_handler" , {})
setattr(lowerCAmelCase_ , "handle_input" , KeyHandler.handle_input)
for value in attrs.values():
a_ =getattr(lowerCAmelCase_ , "handle_key" , [])
for key in handled_keys:
a_ =value
return new_cls
@staticmethod
def lowercase_ ( cls) -> Optional[int]:
"""simple docstring"""
a_ =get_character()
if char != KEYMAP["undefined"]:
a_ =ord(lowerCAmelCase_)
a_ =cls.key_handler.get(lowerCAmelCase_)
if handler:
a_ =char
return handler(cls)
else:
return None
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 41
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a_ =torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1
a_ =model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple
a_ =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ =logits[0, masked_index, :]
a_ =logits.softmax(dim=0 )
a_ , a_ =prob.topk(k=lowercase__ , dim=0 )
a_ =" ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] )
a_ =tokenizer.mask_token
a_ =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a_ =predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(lowercase__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(lowercase__ ) , lowercase__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase__ , lowercase__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 41
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( A ):
__magic_name__ : List[Any] = '''mask2former'''
__magic_name__ : List[Any] = ['''swin''']
__magic_name__ : int = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , a__ = None , a__ = 2_5_6 , a__ = 2_5_6 , a__ = 2_5_6 , a__ = 1_0_2_4 , a__ = "relu" , a__ = 6 , a__ = 1_0 , a__ = 8 , a__ = 0.0 , a__ = 2_0_4_8 , a__ = False , a__ = False , a__ = 4 , a__ = 2_5_5 , a__ = 1_0_0 , a__ = 0.1 , a__ = 2.0 , a__ = 5.0 , a__ = 5.0 , a__ = 1_2_5_4_4 , a__ = 3.0 , a__ = 0.75 , a__ = 0.02 , a__ = 1.0 , a__ = True , a__ = [4, 8, 1_6, 3_2] , a__ = None , **a__ , ) -> Optional[int]:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
A_ = CONFIG_MAPPING['''swin'''](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=a__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(a__ , a__ ):
A_ = backbone_config.pop('''model_type''' )
A_ = CONFIG_MAPPING[backbone_model_type]
A_ = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. "
F"Supported model types: {','.join(self.backbones_supported )}" )
A_ = backbone_config
A_ = feature_size
A_ = mask_feature_size
A_ = hidden_dim
A_ = encoder_feedforward_dim
A_ = activation_function
A_ = encoder_layers
A_ = decoder_layers
A_ = num_attention_heads
A_ = dropout
A_ = dim_feedforward
A_ = pre_norm
A_ = enforce_input_projection
A_ = common_stride
A_ = ignore_value
A_ = num_queries
A_ = no_object_weight
A_ = class_weight
A_ = mask_weight
A_ = dice_weight
A_ = train_num_points
A_ = oversample_ratio
A_ = importance_sample_ratio
A_ = init_std
A_ = init_xavier_std
A_ = use_auxiliary_loss
A_ = feature_strides
A_ = output_auxiliary_logits
A_ = decoder_layers
super().__init__(**a__ )
@classmethod
def lowerCAmelCase_ ( cls , a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
return cls(
backbone_config=a__ , **a__ , )
def lowerCAmelCase_ ( self ) -> Dict[str, any]:
'''simple docstring'''
A_ = copy.deepcopy(self.__dict__ )
A_ = self.backbone_config.to_dict()
A_ = self.__class__.model_type
return output
| 141
|
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod
else:
A_ = binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE : str = 701
SCREAMING_SNAKE_CASE : int = 10_0000_0000
SCREAMING_SNAKE_CASE : Optional[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 141
| 1
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__lowercase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowercase ( __a ):
"""simple docstring"""
def __init__( self : int , *UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =eval_examples
__UpperCamelCase =post_process_function
__UpperCamelCase =quant_trainer_args
__UpperCamelCase =128 # default number of calibration samples
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : int=None ) -> Optional[Any]:
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__UpperCamelCase =calib_dataset if calib_dataset is not None else self.calib_dataset
__UpperCamelCase =self._remove_unused_columns(UpperCamelCase__ , description='''Calibration''' )
return DataLoader(
UpperCamelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase__ , )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : str=None ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.train_dataset if calib_dataset is None else calib_dataset
__UpperCamelCase =self.get_calib_dataloader(UpperCamelCase__ )
__UpperCamelCase =self.model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args , calib=UpperCamelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase__ )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCamelCase__ ):
# Prediction step
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.prediction_step(UpperCamelCase__ , UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase__ , self.quant_trainer_args )
__UpperCamelCase =model
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str = "eval" ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCamelCase =self.get_eval_dataloader(UpperCamelCase__ )
__UpperCamelCase =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase =self.compute_metrics
__UpperCamelCase =None
__UpperCamelCase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCamelCase =eval_loop(
UpperCamelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
__UpperCamelCase =compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__UpperCamelCase =self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
__UpperCamelCase =self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__UpperCamelCase =metrics.pop(UpperCamelCase__ )
self.log(UpperCamelCase__ )
else:
__UpperCamelCase ={}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCamelCase =self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str = "test" ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCamelCase =self.compute_metrics
__UpperCamelCase =None
__UpperCamelCase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCamelCase =eval_loop(
UpperCamelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
__UpperCamelCase =compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCamelCase =self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , '''predict''' )
__UpperCamelCase =self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__UpperCamelCase =metrics.pop(UpperCamelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Any="./" ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.eval_dataset
__UpperCamelCase =self.get_eval_dataloader(UpperCamelCase__ )
__UpperCamelCase =next(iter(UpperCamelCase__ ) )
# saving device - to make it consistent
__UpperCamelCase =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__UpperCamelCase =tuple(v.to(UpperCamelCase__ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__UpperCamelCase =True
__UpperCamelCase =self.model.to(UpperCamelCase__ )
model.eval()
model.float()
__UpperCamelCase =model.module if hasattr(UpperCamelCase__ , '''module''' ) else model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args )
__UpperCamelCase =os.path.join(UpperCamelCase__ , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
__UpperCamelCase ={0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , export_params=UpperCamelCase__ , opset_version=13 , do_constant_folding=UpperCamelCase__ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCamelCase__ , )
logger.info('''onnx export finished''' )
| 719
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ():
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__UpperCamelCase =nn.Linear(3 , 4 )
__UpperCamelCase =nn.BatchNormad(4 )
__UpperCamelCase =nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] )
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
__UpperCamelCase =[]
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(UpperCamelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCamelCase , __UpperCamelCase =mock_training_loop_function('''hello''' )
self.assertListEqual(UpperCamelCase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCamelCase__ : Tuple ):
pass
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase__ : int ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCamelCase__ : Any ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(UpperCamelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase =torch.cuda.memory_allocated()
__UpperCamelCase =ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCamelCase__ )
__UpperCamelCase =release_memory(UpperCamelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , UpperCamelCase__ )
| 296
| 0
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__snake_case :Optional[Any] =getLogger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : int = 1024 , lowerCAmelCase__ : Union[str, Any]="val" , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : int="summarization" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Dict="" , **lowerCAmelCase__ : Union[str, Any] , ) -> Dict:
'''simple docstring'''
A = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=lowerCAmelCase__ )
A = Path(lowerCAmelCase__ )
A = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(lowerCAmelCase__ )
A = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
A = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
A = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
A = num_return_sequences
A = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
A = tokenizer.model_max_length
if prefix is None:
A = prefix or getattr(model.config , 'prefix' , '' ) or ''
A = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1024 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
A = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
A = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
A = []
for batch in tqdm(lowerCAmelCase__ ):
A = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
A = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
A = batch['ids']
if num_return_sequences > 1:
A = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
A = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=lowerCAmelCase__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=lowerCAmelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=lowerCAmelCase__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
'--type_path' , type=lowerCAmelCase__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=lowerCAmelCase__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=lowerCAmelCase__ , default=600 , required=lowerCAmelCase__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument('--tgt_lang' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'--prefix' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
A = time.time()
A , A = parser.parse_known_args()
A = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
A = Path(args.save_dir + '_tmp' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
A = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
A = {}
if args.src_lang is not None:
A = args.src_lang
if args.tgt_lang is not None:
A = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
A , A = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
A = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
A = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
A = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
A = save_dir.joinpath('pseudolabel_results.json' )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
A = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(lowerCAmelCase__ ) as f:
A = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
A = 'translation' in args.task
A = calculate_bleu if calc_bleu else calculate_rouge
A = 'bleu' if calc_bleu else 'rouge'
A = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
A = len(lowerCAmelCase__ )
A = time.time() - start_time
A = round(runtime / metrics['n_obs'] , 4 )
A = num_replicas
# TODO(@stas00): add whatever metadata to metrics
A = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> List:
'''simple docstring'''
A = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
A = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
A = [x['pred'] for x in records]
return preds
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int ) -> List[Dict[str, List]]:
'''simple docstring'''
A = time.time()
logger.info('waiting for all nodes to finish' )
A = None
while (time.time() - start_wait) < timeout:
A = list(save_dir.glob('rank_*.json' ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
A = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 106
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Any:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : str ) -> List[Any]:
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> Tuple:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Dict ) -> Any:
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
# pass variant but use the non-variant filenames
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> int:
# pass variant but use the non-variant filenames
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
| 106
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowerCAmelCase ( a__ ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=3 , lowercase=3 , lowercase=("DownEncoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase=True , ):
super().__init__()
A_ : Tuple = layers_per_block
A_ : List[str] = torch.nn.Convad(
lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ : str = None
A_ : Any = nn.ModuleList([] )
# down
A_ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase__ ):
A_ : Dict = output_channel
A_ : str = block_out_channels[i]
A_ : int = i == len(lowerCAmelCase__ ) - 1
A_ : List[Any] = get_down_block(
lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
self.down_blocks.append(lowerCAmelCase__ )
# mid
A_ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# out
A_ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1E-6 )
A_ : Dict = nn.SiLU()
A_ : Union[str, Any] = 2 * out_channels if double_z else out_channels
A_ : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 )
A_ : List[str] = False
def _a (self , lowercase ):
A_ : Optional[Any] = x
A_ : Dict = self.conv_in(lowerCAmelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowerCAmelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
# middle
A_ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
for down_block in self.down_blocks:
A_ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ )
# middle
A_ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ : Optional[int] = down_block(lowerCAmelCase__ )
# middle
A_ : int = self.mid_block(lowerCAmelCase__ )
# post-process
A_ : Tuple = self.conv_norm_out(lowerCAmelCase__ )
A_ : int = self.conv_act(lowerCAmelCase__ )
A_ : str = self.conv_out(lowerCAmelCase__ )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase=3 , lowercase=3 , lowercase=("UpDecoderBlock2D",) , lowercase=(64,) , lowercase=2 , lowercase=32 , lowercase="silu" , lowercase="group" , ):
super().__init__()
A_ : List[str] = layers_per_block
A_ : Optional[int] = nn.Convad(
lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ : int = None
A_ : Union[str, Any] = nn.ModuleList([] )
A_ : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
A_ : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# up
A_ : List[str] = list(reversed(lowerCAmelCase__ ) )
A_ : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__ ):
A_ : str = output_channel
A_ : Tuple = reversed_block_out_channels[i]
A_ : Any = i == len(lowerCAmelCase__ ) - 1
A_ : str = get_up_block(
lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , )
self.up_blocks.append(lowerCAmelCase__ )
A_ : List[str] = output_channel
# out
if norm_type == "spatial":
A_ : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCAmelCase__ )
else:
A_ : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1E-6 )
A_ : int = nn.SiLU()
A_ : Union[str, Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 )
A_ : int = False
def _a (self , lowercase , lowercase=None ):
A_ : Any = z
A_ : Union[str, Any] = self.conv_in(lowerCAmelCase__ )
A_ : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase ):
def custom_forward(*lowercase ):
return module(*lowerCAmelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
A_ : List[Any] = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
A_ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
# middle
A_ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ )
A_ : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
A_ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# middle
A_ : List[Any] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
A_ : Optional[Any] = up_block(lowerCAmelCase__ , lowerCAmelCase__ )
# post-process
if latent_embeds is None:
A_ : str = self.conv_norm_out(lowerCAmelCase__ )
else:
A_ : Optional[int] = self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : Any = self.conv_act(lowerCAmelCase__ )
A_ : Tuple = self.conv_out(lowerCAmelCase__ )
return sample
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=None , lowercase="random" , lowercase=False , lowercase=True ):
super().__init__()
A_ : Union[str, Any] = n_e
A_ : Optional[Any] = vq_embed_dim
A_ : int = beta
A_ : Any = legacy
A_ : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ : str = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ : List[Any] = self.used.shape[0]
A_ : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ : List[str] = self.re_embed
A_ : List[str] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A_ : str = n_e
A_ : Union[str, Any] = sane_index_shape
def _a (self , lowercase ):
A_ : List[Any] = inds.shape
assert len(lowerCAmelCase__ ) > 1
A_ : Union[str, Any] = inds.reshape(ishape[0] , -1 )
A_ : Optional[Any] = self.used.to(lowerCAmelCase__ )
A_ : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
A_ : str = match.argmax(-1 )
A_ : int = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ : Optional[Any] = self.unknown_index
return new.reshape(lowerCAmelCase__ )
def _a (self , lowercase ):
A_ : str = inds.shape
assert len(lowerCAmelCase__ ) > 1
A_ : Tuple = inds.reshape(ishape[0] , -1 )
A_ : Dict = self.used.to(lowerCAmelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ : Optional[Any] = 0 # simply set to zero
A_ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ )
return back.reshape(lowerCAmelCase__ )
def _a (self , lowercase ):
# reshape z -> (batch, height, width, channel) and flatten
A_ : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ : Optional[int] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ : Tuple = torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 )
A_ : Dict = self.embedding(lowerCAmelCase__ ).view(z.shape )
A_ : Dict = None
A_ : Tuple = None
# compute loss for embedding
if not self.legacy:
A_ : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ : Optional[int] = z + (z_q - z).detach()
# reshape back to match original input shape
A_ : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ : Optional[Any] = self.remap_to_used(lowerCAmelCase__ )
A_ : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _a (self , lowercase , lowercase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
A_ : Optional[Any] = self.unmap_to_all(lowerCAmelCase__ )
A_ : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ : str = self.embedding(lowerCAmelCase__ )
if shape is not None:
A_ : List[str] = z_q.view(lowerCAmelCase__ )
# reshape back to match original input shape
A_ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowerCAmelCase ( a__ ):
def __init__(self , lowercase , lowercase=False ):
A_ : Any = parameters
A_ : Tuple = torch.chunk(lowerCAmelCase__ , 2 , dim=1 )
A_ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ : int = deterministic
A_ : Optional[Any] = torch.exp(0.5 * self.logvar )
A_ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
A_ : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _a (self , lowercase = None ):
# make sure sample is on the same device as the parameters and has same dtype
A_ : List[str] = randn_tensor(
self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ : List[str] = self.mean + self.std * sample
return x
def _a (self , lowercase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _a (self , lowercase , lowercase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
A_ : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ )
def _a (self ):
return self.mean
| 709
|
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 686
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE = """summarizer"""
_SCREAMING_SNAKE_CASE = AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE = ["""text"""]
_SCREAMING_SNAKE_CASE = ["""text"""]
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase__ , return_tensors='pt' , truncation=UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : int ):
"""simple docstring"""
return self.model.generate(**UpperCamelCase__ )[0]
def A ( self : Any , UpperCamelCase__ : str ):
"""simple docstring"""
return self.pre_processor.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
| 430
|
'''simple docstring'''
import argparse
import os
import re
_lowerCamelCase : int = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_lowerCamelCase : Union[str, Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_lowerCamelCase : Optional[Any] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( A__ , A__ = False ) -> Any:
"""simple docstring"""
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.read()
UpperCamelCase = content.split('\n' )
UpperCamelCase = []
UpperCamelCase = 0
while line_idx < len(A__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase = sorted(A__ , key=lambda A__ : _re_identifier.search(A__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
elif "\n".join(A__ ) != content:
return True
def __lowerCamelCase ( A__ = False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [os.path.join(A__ , A__ ) for f in os.listdir(A__ ) if f.endswith('.py' )]
UpperCamelCase = [sort_auto_mapping(A__ , overwrite=A__ ) for fname in fnames]
if not overwrite and any(A__ ):
UpperCamelCase = [f for f, d in zip(A__ , A__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(A__ )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowerCamelCase : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 430
| 1
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : Union[str, Any] ={
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_UpperCAmelCase : Optional[int] ={
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowercase , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase = None , **__lowercase , ) -> None:
lowerCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ : Union[str, Any] = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCAmelCase_ : Optional[int] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase_ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCAmelCase_ : str = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase_ : Tuple = unk_token if pad_token is None else pad_token
lowerCAmelCase_ : str = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase_ : List[str] = '''<pad>''' if pad_token is None else pad_token
lowerCAmelCase_ : List[str] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase_ : Tuple = do_lower_case
lowerCAmelCase_ : Optional[Any] = remove_space
lowerCAmelCase_ : Tuple = keep_accents
lowerCAmelCase_ : Optional[int] = vocab_file
lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase_ : Dict = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase_ : Any = re.compile(
f"""[{"".join(map(__lowercase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = self.__dict__.copy()
lowerCAmelCase_ : Union[str, Any] = None
return state
def __setstate__( self , __lowercase ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowercase_ ( self ) -> int:
return len(self.sp_model )
def lowercase_ ( self , __lowercase ) -> str:
lowerCAmelCase_ : List[Any] = self.non_printing_characters_re.sub('''''' , __lowercase )
# Normalize whitespaces
lowerCAmelCase_ : str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCAmelCase_ : Any = unicodedata.normalize('''NFC''' , __lowercase )
return text
def lowercase_ ( self , __lowercase , **__lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[int] = self.preprocess_text(__lowercase )
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def lowercase_ ( self , __lowercase ) -> int:
return self.sp_model.PieceToId(__lowercase )
def lowercase_ ( self , __lowercase ) -> str:
return self.sp_model.IdToPiece(__lowercase )
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return out_string
def lowercase_ ( self , __lowercase ) -> str:
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Union[str, Any] = ''''''
lowerCAmelCase_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = []
else:
current_sub_tokens.append(__lowercase )
lowerCAmelCase_ : str = False
out_string += self.sp_model.decode(__lowercase )
return out_string
def lowercase_ ( self ) -> Dict[str, int]:
lowerCAmelCase_ : List[Any] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : int = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
lowerCAmelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def lowercase_ ( self , __lowercase , __lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[Any] = self.preprocess_text(__lowercase )
lowerCAmelCase_ : Optional[Any] = self.sp_model.encode(__lowercase )
else:
lowerCAmelCase_ : Any = [self.preprocess_text(__lowercase ) for t in text]
lowerCAmelCase_ : List[str] = self.sp_model.encode(__lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase_ : int = torch.tensor(__lowercase )
return token_ids
def lowercase_ ( self , __lowercase ) -> str:
return self.sp_model.decode(__lowercase )
def lowercase_ ( self , __lowercase ) -> List[int]:
lowerCAmelCase_ : Optional[int] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase_ : List[str] = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__lowercase ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__lowercase )
| 619
|
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 619
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : Dict = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : str = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[Any] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : List[str] = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : List[Any] = prophet
__lowercase : List[Any] = prophet_old
else:
__lowercase : List[Any] = prophet.prophetnet
__lowercase : Optional[Any] = prophet_old.model
__lowercase : Any = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Tuple = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : Optional[int] = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Dict = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : int = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : Tuple = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : Tuple = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : Optional[Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : int = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : int = model[int(lowerCAmelCase_ )]
__lowercase : Optional[int] = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : Tuple = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 149
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[int] , __a : List[str]=14 , __a : Optional[Any]=7 , __a : List[Any]=True , __a : Tuple=True , __a : Union[str, Any]=True , __a : Any=True , __a : Any=True , __a : Dict=99 , __a : List[Any]=32 , __a : Union[str, Any]=5 , __a : List[Any]=4 , __a : Tuple=37 , __a : Dict="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[int]=512 , __a : Union[str, Any]=16 , __a : Tuple=2 , __a : Tuple=0.02 , __a : List[str]=3 , __a : Tuple=4 , __a : int=None , ) -> int:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : Optional[int] = batch_size
__lowercase : int = seq_length
__lowercase : Any = is_training
__lowercase : str = use_token_type_ids
__lowercase : Dict = use_input_mask
__lowercase : Tuple = use_labels
__lowercase : Optional[Any] = use_mc_token_ids
__lowercase : int = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : List[str] = scope
__lowercase : Optional[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Dict = None
if self.use_mc_token_ids:
__lowercase : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowercase : Tuple = None
__lowercase : int = None
__lowercase : Any = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Dict = self.get_config()
__lowercase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : str , __a : Optional[int] , __a : Any , __a : Union[str, Any] , *__a : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__lowercase : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Optional[Any] , *__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : int , __a : Dict , __a : str , __a : List[str] , *__a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : Optional[Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_A : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
_A : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = True
_A : List[Any] = False
_A : List[Any] = False
def lowerCAmelCase ( self : int , __a : Tuple , __a : int , __a : str , __a : int , __a : Dict ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = CTRLModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , n_embd=37 )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__a )
__lowercase : str = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__lowercase : Union[str, Any] = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase : List[Any] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 149
| 1
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
a__ : Any = logging.get_logger(__name__)
a__ : Dict = TypeVar('DatasetType', Dataset, IterableDataset)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[DatasetType] , SCREAMING_SNAKE_CASE_ : Optional[List[float]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE_ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(SCREAMING_SNAKE_CASE_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE_ ).__name__}." )
if i == 0:
UpperCAmelCase, UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , info=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , stopping_strategy=SCREAMING_SNAKE_CASE_ )
else:
return _interleave_iterable_datasets(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , info=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , stopping_strategy=SCREAMING_SNAKE_CASE_ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[DatasetType] , SCREAMING_SNAKE_CASE_ : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE_ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , (Dataset, IterableDataset) ):
if isinstance(SCREAMING_SNAKE_CASE_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(SCREAMING_SNAKE_CASE_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(SCREAMING_SNAKE_CASE_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(SCREAMING_SNAKE_CASE_ ).__name__}." )
if i == 0:
UpperCAmelCase, UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else (IterableDataset, Dataset)
)
elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(SCREAMING_SNAKE_CASE_ , info=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
else:
return _concatenate_iterable_datasets(SCREAMING_SNAKE_CASE_ , info=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
| 717
|
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase = _modexpt(SCREAMING_SNAKE_CASE_ , exponent // 2 , SCREAMING_SNAKE_CASE_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE_ , exponent - 1 , SCREAMING_SNAKE_CASE_ )) % modulo_value
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 1_777 , SCREAMING_SNAKE_CASE_ : int = 1_855 , SCREAMING_SNAKE_CASE_ : int = 8 ) -> int:
"""simple docstring"""
UpperCAmelCase = base
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = _modexpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 570
| 0
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( _UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_UpperCAmelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
__snake_case = QuantumRegister(_UpperCAmelCase , "qr" )
__snake_case = ClassicalRegister(_UpperCAmelCase , "cr" )
__snake_case = QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = number_of_qubits
for i in range(_UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCAmelCase , _UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCAmelCase , _UpperCAmelCase )
# simulate with 10000 shots
__snake_case = Aer.get_backend("qasm_simulator" )
__snake_case = execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_00_00 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 69
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 39
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
A = TypeVar('T')
class UpperCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , snake_case : T ) -> None:
'''simple docstring'''
A = data
A = self
A = 0
class UpperCAmelCase__ ( Generic[T] ):
def __init__( self : str ) -> None:
'''simple docstring'''
A = {}
def A_ ( self : List[str] , snake_case : T ) -> None:
'''simple docstring'''
A = DisjointSetTreeNode(snake_case )
def A_ ( self : Dict , snake_case : T ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
A = self.map[data]
if elem_ref != elem_ref.parent:
A = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def A_ ( self : List[Any] , snake_case : DisjointSetTreeNode[T] , snake_case : DisjointSetTreeNode[T] ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
A = nodea
else:
A = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def A_ ( self : str , snake_case : T , snake_case : T ) -> None:
'''simple docstring'''
self.link(self.find_set(snake_case ) , self.find_set(snake_case ) )
class UpperCAmelCase__ ( Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
A = {}
def A_ ( self : List[str] , snake_case : T ) -> None:
'''simple docstring'''
if node not in self.connections:
A = {}
def A_ ( self : Optional[Any] , snake_case : T , snake_case : T , snake_case : int ) -> None:
'''simple docstring'''
self.add_node(snake_case )
self.add_node(snake_case )
A = weight
A = weight
def A_ ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
A = []
A = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case : x[2] )
# creating the disjoint set
A = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(snake_case )
# MST generation
A = 0
A = 0
A = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A , A , A = edges[index]
index += 1
A = disjoint_set.find_set(snake_case )
A = disjoint_set.find_set(snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(snake_case , snake_case , snake_case )
disjoint_set.union(snake_case , snake_case )
return graph
| 109
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 109
| 1
|
from __future__ import annotations
def _snake_case ( __snake_case ):
return [ord(__snake_case ) - 96 for elem in plain]
def _snake_case ( __snake_case ):
return "".join(chr(elem + 96 ) for elem in encoded )
def _snake_case ( ):
_UpperCamelCase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __snake_case )
print('''Decoded:''' , decode(__snake_case ) )
if __name__ == "__main__":
main()
| 10
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCamelCase_ : Union[str, Any] = '''CIDAS/clipseg-rd64-refined'''
UpperCamelCase_ : Any = '''image_segmenter'''
UpperCamelCase_ : int = CLIPSegForImageSegmentation
UpperCamelCase_ : Optional[Any] = ['''image''', '''text''']
UpperCamelCase_ : int = ['''image''']
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors="pt" )
def _A ( self : str , UpperCAmelCase_ : Optional[Any] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = self.model(**UpperCAmelCase_ ).logits
return logits
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 62
| 0
|
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = len(__snake_case )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , __snake_case ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 705
|
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks if the entire collection has been sorted
if len(lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase , n - 1 )
rec_insertion_sort(lowerCamelCase , n - 1 )
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks order between adjacent elements
if index >= len(lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase , index + 1 )
if __name__ == "__main__":
__snake_case =input("""Enter integers separated by spaces: """)
__snake_case =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 513
| 0
|
import re
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_)) != len(lowerCAmelCase_):
raise ValueError("Invalid Strand")
return dna.translate(dna.maketrans("ATCG" , "TAGC"))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
|
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 250
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : str = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 288
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = '''ylacombe/bark-small'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = '''en_speaker_1'''
lowerCAmelCase__ = '''This is a test string'''
lowerCAmelCase__ = '''speaker_embeddings_path.json'''
lowerCAmelCase__ = '''speaker_embeddings'''
def __snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase__ = 35
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = {
'''semantic_prompt''': np.ones(SCREAMING_SNAKE_CASE_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase__ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase__ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = processor(text=self.input_string )
lowerCAmelCase__ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 288
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _a : int , _a : MutableSequence[float] ) -> None:
if len(_a ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
__lowerCamelCase : list[float] = list(_a )
__lowerCamelCase : Dict = degree
def __add__( self : Tuple , _a : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
__lowerCamelCase : Optional[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _a )
else:
__lowerCamelCase : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _a )
def __sub__( self : Dict , _a : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Any ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , _a : Polynomial ) -> Polynomial:
__lowerCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _a )
def _lowercase ( self : Tuple , _a : int | float ) -> int | float:
__lowerCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Any ) -> str:
__lowerCamelCase : Optional[int] = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_a )
return polynomial
def __repr__( self : Tuple ) -> str:
return self.__str__()
def _lowercase ( self : Any ) -> Polynomial:
__lowerCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
__lowerCamelCase : Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _a )
def _lowercase ( self : int , _a : int | float = 0 ) -> Polynomial:
__lowerCamelCase : list[float] = [0] * (self.degree + 2)
__lowerCamelCase : List[Any] = constant
for i in range(self.degree + 1 ):
__lowerCamelCase : Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _a )
def __eq__( self : int , _a : object ) -> bool:
if not isinstance(_a , _a ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , _a : object ) -> bool:
return not self.__eq__(_a )
| 459
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : CLIPSegForImageSegmentation , _a : CLIPSegProcessor , _a : AutoencoderKL , _a : CLIPTextModel , _a : CLIPTokenizer , _a : UNetaDConditionModel , _a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _a : StableDiffusionSafetyChecker , _a : CLIPImageProcessor , ) -> int:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
__lowerCamelCase : int = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , _a , standard_warn=_a )
__lowerCamelCase : int = dict(scheduler.config )
__lowerCamelCase : Any = 1
__lowerCamelCase : List[Any] = FrozenDict(_a )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
__lowerCamelCase : str = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , _a , standard_warn=_a )
__lowerCamelCase : Dict = dict(scheduler.config )
__lowerCamelCase : Tuple = True
__lowerCamelCase : Union[str, Any] = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def _lowercase ( self : Optional[int] , _a : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowercase ( self : Optional[Any] ) -> int:
self.enable_attention_slicing(_a )
def _lowercase ( self : str ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCamelCase : Dict = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self : Dict ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[str] , _a : Union[str, List[str]] , _a : Union[torch.FloatTensor, PIL.Image.Image] , _a : str , _a : int = 512 , _a : int = 512 , _a : int = 50 , _a : float = 7.5 , _a : Optional[Union[str, List[str]]] = None , _a : Optional[int] = 1 , _a : float = 0.0 , _a : Optional[torch.Generator] = None , _a : Optional[torch.FloatTensor] = None , _a : Optional[str] = "pil" , _a : bool = True , _a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a : int = 1 , **_a : Optional[Any] , ) -> int:
__lowerCamelCase : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
__lowerCamelCase : Union[str, Any] = self.segmentation_model(**_a )
__lowerCamelCase : List[str] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCamelCase : Optional[Any] = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCamelCase : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 459
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase : Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'AutoTokenizer'
lowerCAmelCase = ['tokenizer']
lowerCAmelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None ) -> List[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def __A ( cls : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any="speaker_embeddings_path.json" , **SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return cls(tokenizer=SCREAMING_SNAKE_CASE , speaker_embeddings=SCREAMING_SNAKE_CASE )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]="speaker_embeddings_path.json" , SCREAMING_SNAKE_CASE : Optional[int]="speaker_embeddings" , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[str] , ) -> str:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "v2" ) , exist_ok=SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , SCREAMING_SNAKE_CASE , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , f"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __A ( self : Any , SCREAMING_SNAKE_CASE : str = None , **SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(SCREAMING_SNAKE_CASE )
return voice_preset_dict
def __A ( self : str , SCREAMING_SNAKE_CASE : Optional[dict] = None ) -> Any:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="pt" , SCREAMING_SNAKE_CASE : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Dict=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
if voice_preset is not None and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(SCREAMING_SNAKE_CASE )
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(".npz" ):
lowerCAmelCase = voice_preset + ".npz"
lowerCAmelCase = np.load(SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 159
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 159
| 1
|
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase__ ( lowerCamelCase_):
"""simple docstring"""
a__ : Dict = """microsoft/speecht5_tts"""
a__ : str = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a__ : Dict = """text_reader"""
a__ : List[Any] = SpeechTaProcessor
a__ : Union[str, Any] = SpeechTaForTextToSpeech
a__ : Any = SpeechTaHifiGan
a__ : int = ["""text"""]
a__ : Optional[int] = ["""audio"""]
def snake_case_ ( self : Tuple ) -> str:
if self.post_processor is None:
_A = '''microsoft/speecht5_hifigan'''
super().setup()
def snake_case_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=None ) -> Tuple:
_A = self.pre_processor(text=lowerCAmelCase__ , return_tensors='''pt''' , truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_A = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_A = torch.tensor(embeddings_dataset[73_05]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case_ ( self : List[str] , __lowerCAmelCase : List[Any] ) -> Tuple:
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : List[Any] ) -> int:
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 2
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCamelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__UpperCamelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__UpperCamelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __A ( self ) -> List[str]:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 247
| 0
|
"""simple docstring"""
def __lowercase ( a : List[str] , a : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[int] =''''''
for i in table:
res += inp[i - 1]
return res
def __lowercase ( a : List[str] ) -> List[str]:
return data[1:] + data[0]
def __lowercase ( a : Tuple , a : List[Any] ) -> str:
__snake_case : Union[str, Any] =''''''
for i in range(len(a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowercase ( a : Dict , a : Union[str, Any] ) -> Optional[int]:
__snake_case : List[Any] =int('''0b''' + data[0] + data[-1] , 2 )
__snake_case : Dict =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowercase ( a : Optional[Any] , a : Any , a : Optional[int] , a : List[str] , a : int ) -> Optional[Any]:
__snake_case : Optional[Any] =message[:4]
__snake_case : Dict =message[4:]
__snake_case : str =apply_table(a , a )
__snake_case : Optional[int] =xor(a , a )
__snake_case : str =apply_sbox(a , temp[:4] ) # noqa: E741
__snake_case : int =apply_sbox(a , temp[4:] )
__snake_case : List[str] ='''0''' * (2 - len(a )) + l # noqa: E741
__snake_case : List[str] ='''0''' * (2 - len(a )) + r
__snake_case : int =apply_table(l + r , a )
__snake_case : Optional[int] =xor(a , a )
return temp + right
if __name__ == "__main__":
UpperCamelCase_ : Tuple = input("""Enter 10 bit key: """)
UpperCamelCase_ : List[Any] = input("""Enter 8 bit message: """)
UpperCamelCase_ : str = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase_ : List[str] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase_ : Tuple = [2, 4, 3, 1]
UpperCamelCase_ : Dict = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase_ : int = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase_ : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase_ : Dict = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase_ : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase_ : Optional[int] = apply_table(key, paa_table)
UpperCamelCase_ : Optional[int] = temp[:5]
UpperCamelCase_ : Dict = temp[5:]
UpperCamelCase_ : List[Any] = left_shift(left)
UpperCamelCase_ : Optional[int] = left_shift(right)
UpperCamelCase_ : List[str] = apply_table(left + right, pa_table)
UpperCamelCase_ : Optional[Any] = left_shift(left)
UpperCamelCase_ : List[str] = left_shift(right)
UpperCamelCase_ : Optional[int] = left_shift(left)
UpperCamelCase_ : Optional[Any] = left_shift(right)
UpperCamelCase_ : Dict = apply_table(left + right, pa_table)
# encryption
UpperCamelCase_ : Any = apply_table(message, IP)
UpperCamelCase_ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ : Any = temp[4:] + temp[:4]
UpperCamelCase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ : Any = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase_ : List[Any] = apply_table(CT, IP)
UpperCamelCase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase_ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ : Tuple = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 497
|
"""simple docstring"""
def __lowercase ( a : int , a : int ) -> int:
return x if y == 0 else greatest_common_divisor(a , x % y )
def __lowercase ( a : int , a : int ) -> int:
return (x * y) // greatest_common_divisor(a , a )
def __lowercase ( a : int = 20 ) -> int:
__snake_case : Optional[Any] =1
for i in range(1 , n + 1 ):
__snake_case : Optional[int] =lcm(a , a )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 497
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : int , __a : List[str] , __a : str=1_3 , __a : Tuple=7 , __a : int=True , __a : Any=True , __a : Optional[int]=False , __a : Optional[Any]=True , __a : List[str]=9_9 , __a : Optional[Any]=3_2 , __a : Optional[Any]=5 , __a : Optional[int]=4 , __a : List[Any]=3_7 , __a : Any="gelu" , __a : Optional[Any]=0.1 , __a : str=0.1 , __a : Tuple=5_1_2 , __a : Union[str, Any]=1_6 , __a : int=2 , __a : int=0.0_2 , __a : Any=3 , __a : Optional[int]=4 , __a : Union[str, Any]=None , ) -> Optional[int]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def snake_case__ ( self : Tuple , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : Dict , __a : str , __a : Optional[Any] , __a : int ) -> Optional[int]:
__UpperCAmelCase = BioGptModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a )
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Dict , __a : str , __a : Optional[int] , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , ) -> Optional[Any]:
__UpperCAmelCase = BioGptForCausalLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Dict , __a : List[Any] , __a : Tuple , __a : Any , __a : Tuple , __a : Any , *__a : Dict ) -> Union[str, Any]:
__UpperCAmelCase = BioGptModel(config=__a )
model.to(__a )
model.eval()
# create attention mask
__UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
__UpperCAmelCase = self.seq_length // 2
__UpperCAmelCase = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase = model(__a , attention_mask=__a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__UpperCAmelCase = ids_tensor((1,) , __a ).item() + 1
__UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__UpperCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a )] , dim=1 , )
# get two different outputs
__UpperCAmelCase = model(__a , attention_mask=__a )['''last_hidden_state''']
__UpperCAmelCase = model(__a , past_key_values=__a , attention_mask=__a )['''last_hidden_state''']
# select random slice
__UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : Tuple , __a : Dict , __a : int , __a : Dict , *__a : int ) -> Optional[Any]:
__UpperCAmelCase = BioGptModel(config=__a ).to(__a ).eval()
__UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
# first forward pass
__UpperCAmelCase = model(__a , attention_mask=__a , use_cache=__a )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCAmelCase = model(__a , attention_mask=__a )['''last_hidden_state''']
__UpperCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case__ ( self : List[Any] , __a : Any , __a : Optional[Any] , __a : Optional[int] , __a : Dict , __a : Optional[Any] , *__a : Any , __a : Union[str, Any]=False ) -> Union[str, Any]:
__UpperCAmelCase = BioGptForCausalLM(__a )
model.to(__a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case__ ( self : Union[str, Any] , __a : int , *__a : List[Any] ) -> List[str]:
__UpperCAmelCase = BioGptModel(__a )
__UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def snake_case__ ( self : Tuple , __a : int , __a : List[str] , __a : Any , __a : str , __a : Optional[Any] , *__a : Tuple ) -> Union[str, Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = BioGptForTokenClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a_ = (BioGptForCausalLM,) if is_torch_available() else ()
a_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
def snake_case__ ( self : Tuple ) -> List[Any]:
__UpperCAmelCase = BioGptModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def snake_case__ ( self : int ) -> int:
self.config_tester.run_common_tests()
def snake_case__ ( self : Tuple ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase = type
self.model_tester.create_and_check_model(*__a )
def snake_case__ ( self : List[str] ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a )
def snake_case__ ( self : str ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a )
def snake_case__ ( self : int ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a )
def snake_case__ ( self : Optional[Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a )
@slow
def snake_case__ ( self : int ) -> Tuple:
__UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__a )
__UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__UpperCAmelCase = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase = tokenizer.eos_token
__UpperCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a )
__UpperCAmelCase = inputs['''input_ids'''].to(__a )
__UpperCAmelCase = model.generate(
input_ids=__a , attention_mask=inputs['''attention_mask'''].to(__a ) , )
__UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__a )
__UpperCAmelCase = model.generate(input_ids=__a )
__UpperCAmelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__a )
__UpperCAmelCase = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
__UpperCAmelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a )
__UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
__UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
__UpperCAmelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
@slow
def snake_case__ ( self : Any ) -> Dict:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = BioGptModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = input_dict['''input_ids''']
__UpperCAmelCase = input_ids.ne(1 ).to(__a )
__UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = '''multi_label_classification'''
__UpperCAmelCase = input_dict['''input_ids''']
__UpperCAmelCase = input_ids.ne(1 ).to(__a )
__UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Optional[int]:
__UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__UpperCAmelCase = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = 4_2_3_8_4
__UpperCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __a )
__UpperCAmelCase = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__UpperCAmelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__a )
torch.manual_seed(0 )
__UpperCAmelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__a )
__UpperCAmelCase = model.generate(
**__a , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__a , )
__UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
__UpperCAmelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__a , __a )
| 262
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : List[str] , __a : List[Any] , __a : Dict=None , __a : str=None , __a : List[Any]=None , __a : Union[str, Any]="resnet50" , __a : List[Any]=3 , __a : List[Any]=3_2 , __a : List[Any]=3 , __a : Optional[int]=True , __a : str=True , ) -> Any:
__UpperCAmelCase = parent
__UpperCAmelCase = out_indices if out_indices is not None else [4]
__UpperCAmelCase = stage_names
__UpperCAmelCase = out_features
__UpperCAmelCase = backbone
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = use_pretrained_backbone
__UpperCAmelCase = is_training
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self : Union[str, Any] ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case__ ( self : int , __a : Union[str, Any] , __a : List[Any] ) -> Tuple:
__UpperCAmelCase = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def snake_case__ ( self : Dict ) -> Tuple:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def snake_case__ ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase = TimmBackboneModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case__ ( self : List[str] ) -> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase = '''resnet18'''
__UpperCAmelCase = '''microsoft/resnet-18'''
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__UpperCAmelCase = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def snake_case__ ( self : str ) -> Optional[Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def snake_case__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def snake_case__ ( self : str ) -> int:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def snake_case__ ( self : str ) -> Dict:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : str ) -> List[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Tuple ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def snake_case__ ( self : List[str] ) -> Dict:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def snake_case__ ( self : int ) -> int:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : int ) -> int:
pass
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = True
__UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCAmelCase = self.all_model_classes[0]
__UpperCAmelCase = model_class(__a )
model.to(__a )
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model(**__a )
__UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case__ ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCAmelCase = copy.deepcopy(__a )
__UpperCAmelCase = None
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__UpperCAmelCase = copy.deepcopy(__a )
__UpperCAmelCase = False
__UpperCAmelCase = model_class(__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(**__a )
| 262
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_A ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_A ) == 1:
return True
SCREAMING_SNAKE_CASE__ = series[1] - series[0]
for index in range(len(_A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_A ) == 0:
raise ValueError('''Input list must be a non empty list''' )
SCREAMING_SNAKE_CASE__ = 0
for val in series:
answer += val
return answer / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size for training."} )
a = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a = field(
default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
a = field(default="cosine" , metadata={"help": "Learning rate."} )
a = field(
default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
a = field(
default=A__ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} )
a = field(default=1 , metadata={"help": "Training seed."} )
a = field(
default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a = field(
default=A__ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a = field(default=A__ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
a = field(
default=A__ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a = field(
default=A__ , metadata={"help": "Sample from the language model's output distribution."} )
a = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} )
a = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a = field(default=0.9_5 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
a = field(
default=2_00 , metadata={"help": "Number of completions to generate for each sample."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default=A__ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a = field(
default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(
default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a = field(
default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a = field(
default=0.2_5 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a = field(
default=A__ , metadata={"help": "If True, near-duplicate samples are removed."} )
a = field(
default=0.8_5 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} )
a = field(
default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} )
a = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
| 472
| 0
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ :Dict = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
__UpperCAmelCase : str = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->PegasusForConditionalGeneration:
"""simple docstring"""
__UpperCAmelCase : Tuple = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = PegasusConfig(**UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = torch_model.model.state_dict()
__UpperCAmelCase : List[Any] = {}
for k, v in tf_weights.items():
__UpperCAmelCase : Optional[Any] = rename_state_dict_key(UpperCAmelCase_ )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__UpperCAmelCase : Optional[Any] = v.T
__UpperCAmelCase : Union[str, Any] = torch.tensor(UpperCAmelCase_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__UpperCAmelCase : Dict = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__UpperCAmelCase : Dict = mapping['''shared.weight''']
__UpperCAmelCase : str = mapping['''shared.weight''']
__UpperCAmelCase : Optional[Any] = {k: torch.zeros_like(UpperCAmelCase_ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase : str = torch_model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( UpperCAmelCase_="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = tf.train.list_variables(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(UpperCAmelCase_ , desc='''converting tf checkpoint to dict''' ):
__UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCAmelCase : List[Any] = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = array
return tf_weights
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = Path(UpperCAmelCase_ ).parent.name
__UpperCAmelCase : Tuple = task_specific_params[f'''summarization_{dataset}''']['''max_position_embeddings''']
__UpperCAmelCase : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=UpperCAmelCase_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase_ )
# convert model
__UpperCAmelCase : Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
__UpperCAmelCase : Union[str, Any] = task_specific_params
__UpperCAmelCase : Union[str, Any] = convert_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Dict = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(UpperCAmelCase_ , Path(UpperCAmelCase_ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowercase__ :Optional[Any] = parser.parse_args()
if args.save_dir is None:
lowercase__ :Optional[Any] = Path(args.tf_ckpt_path).parent.name
lowercase__ :Tuple = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 522
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase__ :Union[str, Any] = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowercase__ :List[Any] = parser.parse_args()
lowercase__ :Tuple = 'cpu'
lowercase__ :Any = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowercase__ :Optional[int] = 'path-to-your-trained-model'
lowercase__ :Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase__ :Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase__ :List[str] = pipe.to(device)
# to channels last
lowercase__ :List[Any] = pipe.unet.to(memory_format=torch.channels_last)
lowercase__ :Dict = pipe.vae.to(memory_format=torch.channels_last)
lowercase__ :str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase__ :Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase__ :Optional[int] = torch.randn(2, 4, 6_4, 6_4)
lowercase__ :str = torch.rand(1) * 9_9_9
lowercase__ :Optional[int] = torch.randn(2, 7_7, 7_6_8)
lowercase__ :str = (sample, timestep, encoder_hidden_status)
try:
lowercase__ :Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase__ :Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase__ :Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase__ :Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase__ :Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase__ :Any = 6_6_6
lowercase__ :Tuple = torch.Generator(device).manual_seed(seed)
lowercase__ :Tuple = {'generator': generator}
if args.steps is not None:
lowercase__ :List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase__ :List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 522
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
_SCREAMING_SNAKE_CASE : str = "question"
_SCREAMING_SNAKE_CASE : str = "context"
_SCREAMING_SNAKE_CASE : str = "answers"
@property
def a ( self : Tuple ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 330
|
'''simple docstring'''
from collections import defaultdict
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
__lowerCAmelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE__ ) )
]
__lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase = (1 << len(SCREAMING_SNAKE_CASE__ )) - 1
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase = self.count_ways_until(SCREAMING_SNAKE_CASE__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCAmelCase = total_ways_util
return self.dp[mask][task_no]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> str:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A : Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 330
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if train_file is not None:
__SCREAMING_SNAKE_CASE : Any = [train_file]
if eval_file is not None:
__SCREAMING_SNAKE_CASE : Any = [eval_file]
if test_file is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [test_file]
__SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
__SCREAMING_SNAKE_CASE : Dict = features_name.pop(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(_lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__SCREAMING_SNAKE_CASE : int = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__SCREAMING_SNAKE_CASE : int = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__SCREAMING_SNAKE_CASE : str = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__SCREAMING_SNAKE_CASE : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
__SCREAMING_SNAKE_CASE : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__SCREAMING_SNAKE_CASE : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__SCREAMING_SNAKE_CASE : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : int = field(metadata={'''help''': '''Which column contains the label'''} )
_A : str = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the training file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the development file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the test file'''} )
_A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : bool = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : List[Any] = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 578
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if len(_lowerCamelCase ) <= 1:
return [tuple(_lowerCamelCase )]
__SCREAMING_SNAKE_CASE : Optional[int] = []
def generate(_lowerCamelCase: int , _lowerCamelCase: list ):
__SCREAMING_SNAKE_CASE : Dict = [0] * n
res.append(tuple(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = arr[i], arr[0]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = arr[i], arr[c[i]]
res.append(tuple(_lowerCamelCase ) )
c[i] += 1
__SCREAMING_SNAKE_CASE : Dict = 0
else:
__SCREAMING_SNAKE_CASE : List[str] = 0
i += 1
generate(len(_lowerCamelCase ) , _lowerCamelCase )
return res
if __name__ == "__main__":
UpperCamelCase__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 578
| 1
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase : pyspark.sql.DataFrame , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : bool = True , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : str = None , UpperCamelCase : bool = True , UpperCamelCase : str = "arrow" , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : Optional[int] = load_from_cache_file
__UpperCAmelCase : Dict = file_format
__UpperCAmelCase : Tuple = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__UpperCAmelCase : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 721
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
'''simple docstring'''
if not (isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : List[str] = len(_UpperCamelCase )
__UpperCAmelCase : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__UpperCAmelCase : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__UpperCAmelCase : Optional[int] = i
__UpperCAmelCase : Any = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_SCREAMING_SNAKE_CASE =XLMProphetNetForConditionalGenerationOld.from_pretrained(_UpperCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =XLMProphetNetForConditionalGeneration.from_pretrained(
_UpperCamelCase , output_loading_info=_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ProphetNetForConditionalGenerationOld.from_pretrained(_UpperCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =ProphetNetForConditionalGeneration.from_pretrained(
_UpperCamelCase , output_loading_info=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =['key_proj', 'value_proj', 'query_proj']
_SCREAMING_SNAKE_CASE ={
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_SCREAMING_SNAKE_CASE =key.split('.' )
if attributes[0] == "lm_head":
_SCREAMING_SNAKE_CASE =prophet
_SCREAMING_SNAKE_CASE =prophet_old
else:
_SCREAMING_SNAKE_CASE =prophet.prophetnet
_SCREAMING_SNAKE_CASE =prophet_old.model
_SCREAMING_SNAKE_CASE =False
for attribute in attributes:
if attribute in mapping:
_SCREAMING_SNAKE_CASE =mapping[attribute]
if not hasattr(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE =attribute
elif hasattr(_UpperCamelCase , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE =old_model.weight
logger.info(f"{attribute} is initialized." )
_SCREAMING_SNAKE_CASE =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_SCREAMING_SNAKE_CASE =old_model.bias
logger.info(f"{attribute} is initialized" )
_SCREAMING_SNAKE_CASE =True
break
elif attribute in special_keys and hasattr(_UpperCamelCase , 'in_proj_weight' ):
_SCREAMING_SNAKE_CASE =old_model.in_proj_weight.shape[0] // 3
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , _UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_SCREAMING_SNAKE_CASE =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_SCREAMING_SNAKE_CASE =nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_SCREAMING_SNAKE_CASE =True
break
if attribute.isdigit():
_SCREAMING_SNAKE_CASE =model[int(_UpperCamelCase )]
_SCREAMING_SNAKE_CASE =old_model[int(_UpperCamelCase )]
else:
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , _UpperCamelCase )
if old_attribute == "":
_SCREAMING_SNAKE_CASE =old_model
else:
if not hasattr(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(f"{old_model} does not have {old_attribute}" )
_SCREAMING_SNAKE_CASE =getattr(_UpperCamelCase , _UpperCamelCase )
if not is_key_init:
raise ValueError(f"{key} was not correctly initialized!" )
print(f"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 405
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase : str = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : int=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
_SCREAMING_SNAKE_CASE =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A__ :
def __init__( self : Optional[Any] , _a : Union[str, Any] , _a : List[Any]=13 , _a : int=7 , _a : List[Any]=True , _a : Optional[Any]=False , _a : Tuple=99 , _a : List[str]=16 , _a : Any=2 , _a : Tuple=4 , _a : List[str]=4 , _a : Optional[int]="gelu" , _a : List[str]=0.1 , _a : Optional[Any]=0.1 , _a : int=32 , _a : int=2 , _a : str=1 , _a : Dict=0 , _a : Tuple=0.02 , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =eos_token_id
_SCREAMING_SNAKE_CASE =pad_token_id
_SCREAMING_SNAKE_CASE =bos_token_id
_SCREAMING_SNAKE_CASE =initializer_range
def A ( self : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE =shift_tokens_right(_a , 1 , 2 )
_SCREAMING_SNAKE_CASE =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , )
_SCREAMING_SNAKE_CASE =prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : Dict , _a : Dict , _a : Union[str, Any] , _a : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =model_class_name(_a )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['input_ids'] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_SCREAMING_SNAKE_CASE =model.init_cache(decoder_input_ids.shape[0] , _a , _a )
_SCREAMING_SNAKE_CASE =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_SCREAMING_SNAKE_CASE =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =model.decode(_a , _a )
_SCREAMING_SNAKE_CASE =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
def A ( self : Optional[int] , _a : List[str] , _a : Optional[Any] , _a : str ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =model_class_name(_a )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['input_ids'] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_SCREAMING_SNAKE_CASE =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE =model.init_cache(decoder_input_ids.shape[0] , _a , _a )
_SCREAMING_SNAKE_CASE =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_SCREAMING_SNAKE_CASE =model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
_SCREAMING_SNAKE_CASE =model.decode(_a , _a , decoder_attention_mask=_a )
_SCREAMING_SNAKE_CASE =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( unittest.TestCase ):
A__ = 99
def A ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE =input_ids.shape[0]
_SCREAMING_SNAKE_CASE =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_config_and_data()
_SCREAMING_SNAKE_CASE =FlaxBlenderbotSmallForConditionalGeneration(_a )
_SCREAMING_SNAKE_CASE =lm_model(input_ids=_a )
_SCREAMING_SNAKE_CASE =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_SCREAMING_SNAKE_CASE =FlaxBlenderbotSmallForConditionalGeneration(_a )
_SCREAMING_SNAKE_CASE =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =lm_model(input_ids=_a , decoder_input_ids=_a )
_SCREAMING_SNAKE_CASE =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE =shift_tokens_right(_a , 1 , 2 )
_SCREAMING_SNAKE_CASE =np.equal(_a , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE =np.equal(_a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A__ ( A__ , unittest.TestCase , A__ ):
A__ = True
A__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A ( self : Tuple ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =FlaxBlenderbotSmallModelTester(self )
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a )
_SCREAMING_SNAKE_CASE =model_class(_a )
@jax.jit
def encode_jitted(_a : Optional[Any] , _a : Optional[Any]=None , **_a : Any ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('JIT Enabled' ):
_SCREAMING_SNAKE_CASE =encode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE =encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_SCREAMING_SNAKE_CASE ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_a : Tuple , _a : Any , _a : Optional[Any] ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('JIT Enabled' ):
_SCREAMING_SNAKE_CASE =decode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE =decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Any ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE =np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE =model(_a )
self.assertIsNotNone(_a )
| 405
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ):
snake_case__ : Dict = tempfile.mkdtemp()
# fmt: off
snake_case__ : Optional[int] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : Tuple = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case__ : List[str] = {"""unk_token""": """<unk>"""}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
snake_case__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case__ : Tuple = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def _lowercase ( self : List[Any] , **__A : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self : List[Any] , **__A : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self : Union[str, Any] , **__A : int ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Any ):
snake_case__ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Optional[int] = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Any ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_rust_tokenizer()
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
snake_case__ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case__ : List[str] = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
snake_case__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = image_processor(_lowercase , return_tensors="np" )
snake_case__ : Any = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : int = """lower newer"""
snake_case__ : List[str] = processor(text=_lowercase )
snake_case__ : Optional[int] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Any ):
snake_case__ : int = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Union[str, Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : str = """lower newer"""
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Dict = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _lowercase ( self : List[str] ):
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : str = processor.batch_decode(_lowercase )
snake_case__ : Any = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _lowercase ( self : str ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : Union[str, Any] = """lower newer"""
snake_case__ : Optional[Any] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 721
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25
| 0
|
'''simple docstring'''
from math import factorial, pi
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 30 ):
if not isinstance(UpperCAmelCase , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase__ : Tuple = float(UpperCAmelCase )
lowercase__ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase ) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 30 ):
if not isinstance(UpperCAmelCase , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase__ : int = float(UpperCAmelCase )
lowercase__ : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 152
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 0
lowercase__ : int = len(UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCamelCase ( UpperCAmelCase ):
if len(UpperCAmelCase ) <= 1:
return arr, 0
lowercase__ : List[str] = len(UpperCAmelCase ) // 2
lowercase__ : Optional[Any] = arr[0:mid]
lowercase__ : Any = arr[mid:]
lowercase__ , lowercase__ : Any = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : List[str] = count_inversions_recursive(UpperCAmelCase )
lowercase__ , lowercase__ : Optional[Any] = _count_cross_inversions(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(UpperCAmelCase ) and j < len(UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCamelCase ( ):
lowercase__ : str = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ : Dict = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : Union[str, Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ : Optional[int] = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : int = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
# an empty list should also have zero inversions
lowercase__ : Optional[Any] = []
lowercase__ : Any = count_inversions_bf(UpperCAmelCase )
lowercase__ , lowercase__ : List[Any] = count_inversions_recursive(UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase )
if __name__ == "__main__":
main()
| 152
| 1
|
'''simple docstring'''
def snake_case__ ( lowerCamelCase_ ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : List[Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(_lowerCAmelCase )
if number < 1:
A : str = F'Input value of [number={number}] must be > 0'
raise ValueError(_lowerCAmelCase )
A : Optional[int] = 1
for i in range(1 , _lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase : Dict = logging.getLogger(__name__)
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
A : int = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if not self.initialized:
A : str = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Tuple = True
def snake_case ( self ) -> int:
self.retriever.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
A , A : str = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(__UpperCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for worker in self.retrieval_workers
] )
def snake_case ( self ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A , A : Union[str, Any] = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase ) )
else:
A , A : Any = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Any:
return super(__UpperCAmelCase , cls ).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
A : int = kwargs.pop('''config''' , __UpperCAmelCase ) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
A : Tuple = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
A : Any = rag_tokenizer.question_encoder
A : int = rag_tokenizer.generator
if indexed_dataset is not None:
A : Optional[int] = '''custom'''
A : Tuple = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase )
else:
A : Union[str, Any] = cls._build_index(__UpperCAmelCase )
return cls(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
| 423
| 0
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int: # noqa: E741
"""simple docstring"""
_A = len(_SCREAMING_SNAKE_CASE )
_A = 0
_A = [0] * n
_A = [False] * n
_A = [False] * n
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
_A = True
_A = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_A = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_A = True
# AP found via cycle
if at == low[to]:
_A = True
else:
_A = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_A = 0
_A = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_A = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
__A : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 27
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowercase :
def __init__(self : Dict , snake_case : Dict , snake_case : List[Any]=13 , snake_case : int=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Any=2 , snake_case : Optional[Any]=4 , snake_case : int=37 , snake_case : Dict="gelu" , snake_case : Dict=0.1 , snake_case : Dict=0.1 , snake_case : Optional[int]=512 , snake_case : Dict=16 , snake_case : Union[str, Any]=2 , snake_case : Dict=0.02 , snake_case : str=3 , snake_case : str=4 , snake_case : Optional[Any]=None , snake_case : str=0 , ) -> Optional[Any]:
_lowercase : int = parent
_lowercase : Tuple = batch_size
_lowercase : str = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : List[Any] = use_labels
_lowercase : List[str] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Dict = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : List[str] = type_sequence_label_size
_lowercase : str = initializer_range
_lowercase : Tuple = num_labels
_lowercase : List[Any] = num_choices
_lowercase : Dict = scope
_lowercase : Optional[Any] = projection_dim
def _a(self : Tuple ) -> List[str]:
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
_lowercase : Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : str ) -> List[str]:
_lowercase : List[Any] = TFDPRContextEncoder(config=snake_case )
_lowercase : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : Optional[int] = model(snake_case , token_type_ids=snake_case )
_lowercase : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : List[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : int , snake_case : Optional[int] , snake_case : Any , snake_case : Union[str, Any] ) -> int:
_lowercase : Optional[Any] = TFDPRQuestionEncoder(config=snake_case )
_lowercase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case , token_type_ids=snake_case )
_lowercase : str = model(snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _a(self : Union[str, Any] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Any:
_lowercase : Any = TFDPRReader(config=snake_case )
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _a(self : int ) -> Optional[Any]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : int = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_A = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_A = False
_A = False
_A = False
_A = False
_A = False
def _a(self : str ) -> List[str]:
_lowercase : List[Any] = TFDPRModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _a(self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a(self : Any ) -> Tuple:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case )
def _a(self : Dict ) -> Optional[Any]:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case )
def _a(self : List[Any] ) -> Any:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case )
@slow
def _a(self : int ) -> str:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[str] = TFDPRContextEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = TFDPRQuestionEncoder.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = TFDPRReader.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : Dict ) -> Any:
_lowercase : Any = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_lowercase : Optional[int] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_lowercase : List[Any] = model(snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowercase : Optional[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 461
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = """MCTCTFeatureExtractor"""
_lowercase : str = """AutoTokenizer"""
def __init__( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
super().__init__(lowercase__ , lowercase__ )
_snake_case : List[str] = self.feature_extractor
_snake_case : Any = False
def __call__( self , *lowercase__ , **lowercase__ ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase__ , **lowercase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_snake_case : List[str] = kwargs.pop('''raw_speech''' )
else:
_snake_case : str = kwargs.pop('''audio''' , lowercase__ )
_snake_case : Dict = kwargs.pop('''sampling_rate''' , lowercase__ )
_snake_case : Optional[int] = kwargs.pop('''text''' , lowercase__ )
if len(lowercase__ ) > 0:
_snake_case : Any = args[0]
_snake_case : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_snake_case : Tuple = self.feature_extractor(lowercase__ , *lowercase__ , sampling_rate=lowercase__ , **lowercase__ )
if text is not None:
_snake_case : str = self.tokenizer(lowercase__ , **lowercase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : Dict = encodings['''input_ids''']
return inputs
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase__ , **lowercase__ )
_snake_case : int = kwargs.pop('''input_features''' , lowercase__ )
_snake_case : Tuple = kwargs.pop('''labels''' , lowercase__ )
if len(lowercase__ ) > 0:
_snake_case : Union[str, Any] = args[0]
_snake_case : List[Any] = args[1:]
if input_features is not None:
_snake_case : Union[str, Any] = self.feature_extractor.pad(lowercase__ , *lowercase__ , **lowercase__ )
if labels is not None:
_snake_case : Union[str, Any] = self.tokenizer.pad(lowercase__ , **lowercase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_snake_case : List[Any] = labels['''input_ids''']
return input_features
def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@contextmanager
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_snake_case : List[str] = True
_snake_case : Union[str, Any] = self.tokenizer
yield
_snake_case : Optional[int] = self.feature_extractor
_snake_case : Union[str, Any] = False
| 717
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : Optional[Any] = {
'gpt-neox-20b': 2_0_4_8,
}
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
_snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
_snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
_snake_case : int = add_prefix_space
_snake_case : Optional[Any] = pre_tok_class(**lowercase__ )
_snake_case : List[str] = add_prefix_space
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
"""simple docstring"""
_snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
_snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
| 47
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Dict = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 131
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = ProphetNetTokenizer
__UpperCAmelCase : Dict = False
def __lowerCAmelCase ( self ) -> int:
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , snake_case_ ) -> Dict:
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __lowerCAmelCase ( self ) -> str:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __lowerCAmelCase ( self ) -> Dict:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __lowerCAmelCase ( self ) -> List[str]:
_a = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Any:
_a = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_a = {}
for i, token in enumerate(snake_case_ ):
_a = i
_a = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_a = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
_a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __lowerCAmelCase ( self ) -> str:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __lowerCAmelCase ( self ) -> int:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __lowerCAmelCase ( self ) -> str:
_a = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_a = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 131
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase : Optional[int] =TypeVar("T")
class __snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : list[T] , _UpperCamelCase : Callable[[T, T], T]) ->None:
"""simple docstring"""
_lowerCamelCase : Any | T = None
_lowerCamelCase : int = len(_UpperCamelCase)
_lowerCamelCase : list[T] = [any_type for _ in range(self.N)] + arr
_lowerCamelCase : List[Any] = fnc
self.build()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1):
_lowerCamelCase : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : T) ->None:
"""simple docstring"""
p += self.N
_lowerCamelCase : List[Any] = v
while p > 1:
_lowerCamelCase : Union[str, Any] = p // 2
_lowerCamelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1])
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int) ->T | None: # noqa: E741
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = l + self.N, r + self.N
_lowerCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
_lowerCamelCase : int = self.st[l] if res is None else self.fn(_UpperCamelCase , self.st[l])
if r % 2 == 0:
_lowerCamelCase : List[Any] = self.st[r] if res is None else self.fn(_UpperCamelCase , self.st[r])
_lowerCamelCase , _lowerCamelCase : Tuple = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase : Optional[int] =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowerCAmelCase : str ={
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowerCAmelCase : Union[str, Any] =SegmentTree(test_array, min)
lowerCAmelCase : Any =SegmentTree(test_array, max)
lowerCAmelCase : Any =SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
'''simple docstring'''
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
_lowerCamelCase : Optional[int] = reduce(__A , test_array[i : j + 1] )
_lowerCamelCase : Union[str, Any] = reduce(__A , test_array[i : j + 1] )
_lowerCamelCase : Union[str, Any] = reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase : Union[str, Any] =value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 15
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'swin'
_snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , _UpperCamelCase : List[str]=224 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Dict=96 , _UpperCamelCase : Any=[2, 2, 6, 2] , _UpperCamelCase : Any=[3, 6, 12, 24] , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Tuple=4.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : str=False , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Dict=1E-5 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(**_UpperCamelCase)
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Union[str, Any] = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : str = len(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = num_heads
_lowerCamelCase : Tuple = window_size
_lowerCamelCase : int = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Tuple = drop_path_rate
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = use_absolute_embeddings
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : int = int(embed_dim * 2 ** (len(_UpperCamelCase) - 1))
_lowerCamelCase : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_UpperCamelCase) + 1)]
_lowerCamelCase , _lowerCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names)
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = version.parse('1.11' )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
"""simple docstring"""
return 1E-4
| 15
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowerCamelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
lowerCamelCase__ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : str = CamembertTokenizer
lowerCamelCase : int = CamembertTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : Any = True
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case :List[Any] = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = """<pad>"""
__snake_case :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(a__ ) , 10_04 )
def __lowercase ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[Any] = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
__snake_case :Union[str, Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__snake_case :Any = """I was born in 92000, and this is falsé."""
__snake_case :Optional[int] = tokenizer.encode(a__ )
__snake_case :List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
__snake_case :Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__snake_case :Optional[Any] = tokenizer.convert_ids_to_tokens(a__ )
__snake_case :Optional[int] = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case :Optional[Any] = self.get_tokenizer()
__snake_case :Optional[int] = self.get_rust_tokenizer()
__snake_case :Union[str, Any] = """I was born in 92000, and this is falsé."""
__snake_case :Dict = tokenizer.tokenize(a__ )
__snake_case :int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case :Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :List[str] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
__snake_case :Optional[Any] = self.get_rust_tokenizer()
__snake_case :Tuple = tokenizer.encode(a__ )
__snake_case :Tuple = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Any = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__snake_case :str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=a__ , )
| 455
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 50_0000
lowerCamelCase__ , lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCamelCase ( snake_case__ : datasets.Dataset ,**snake_case__ : Any ):
'''simple docstring'''
__snake_case :List[str] = dataset.map(**snake_case__ )
@get_duration
def UpperCamelCase ( snake_case__ : datasets.Dataset ,**snake_case__ : Dict ):
'''simple docstring'''
__snake_case :int = dataset.filter(**snake_case__ )
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :str = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case :Dict = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__snake_case :Optional[int] = generate_example_dataset(
os.path.join(snake_case__ ,"""dataset.arrow""" ) ,snake_case__ ,num_examples=snake_case__ )
__snake_case :str = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" ,use_fast=snake_case__ )
def tokenize(snake_case__ : Optional[int] ):
return tokenizer(examples["""text"""] )
__snake_case :Optional[Any] = map(snake_case__ )
__snake_case :Optional[Any] = map(snake_case__ ,batched=snake_case__ )
__snake_case :Optional[Any] = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""numpy""" ):
__snake_case :int = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""pandas""" ):
__snake_case :List[Any] = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""torch""" ,columns="""numbers""" ):
__snake_case :str = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
with dataset.formatted_as(type="""tensorflow""" ,columns="""numbers""" ):
__snake_case :Optional[int] = map(snake_case__ ,function=lambda snake_case__ : None ,batched=snake_case__ )
__snake_case :Dict = map(snake_case__ ,function=snake_case__ ,batched=snake_case__ )
__snake_case :Tuple = filter(snake_case__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case__ ,"""wb""" ) as f:
f.write(json.dumps(snake_case__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 455
| 1
|
'''simple docstring'''
from math import pi, sqrt, tan
def lowerCamelCase__ ( a ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase__ ( a , a , a ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase__ ( a ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase__ ( a ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase__ ( a , a ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase__ ( a , a , a ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__snake_case = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase__ ( a , a ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase__ ( a , a ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(A_ , 2 ) * torus_radius * tube_radius
def lowerCamelCase__ ( a , a ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase__ ( a ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase__ ( a , a ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase__ ( a , a , a ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__snake_case = (sidea + sidea + sidea) / 2
__snake_case = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase__ ( a , a ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase__ ( a , a , a ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase__ ( a ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase__ ( a , a ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase__ ( a , a ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase__ ( a , a ):
if not isinstance(A_ , A_ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \\nlength of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("""\nSurface Areas of various geometric shapes: \n""")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 710
|
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( a , a=None ):
require_version(deps[pkg] , a )
| 427
| 0
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE = {
'b0': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1_408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1_536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1_792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2_304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2_560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowercase_ ( __A : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : int =EfficientNetConfig()
lowercase : Optional[int] =CONFIG_MAP[model_name]['''hidden_dim''']
lowercase : Dict =CONFIG_MAP[model_name]['''width_coef''']
lowercase : Optional[Any] =CONFIG_MAP[model_name]['''depth_coef''']
lowercase : List[str] =CONFIG_MAP[model_name]['''image_size''']
lowercase : str =CONFIG_MAP[model_name]['''dropout_rate''']
lowercase : Any =CONFIG_MAP[model_name]['''dw_padding''']
lowercase : Optional[int] ='''huggingface/label-files'''
lowercase : Tuple ='''imagenet-1k-id2label.json'''
lowercase : Union[str, Any] =1_0_0_0
lowercase : List[Any] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : str ={int(__A ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
return config
def lowercase_ ( ) -> Dict:
"""simple docstring"""
lowercase : str ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple =Image.open(requests.get(__A , stream=__A ).raw )
return im
def lowercase_ ( __A : int ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] =CONFIG_MAP[model_name]['''image_size''']
lowercase : Tuple =EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__A , )
return preprocessor
def lowercase_ ( __A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] =[v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
lowercase : Any =sorted(set(__A ) )
lowercase : Dict =len(__A )
lowercase : Dict ={b: str(__A ) for b, i in zip(__A , range(__A ) )}
lowercase : str =[]
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
lowercase : List[Any] =block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
lowercase : str ={}
for item in rename_keys:
if item[0] in original_param_names:
lowercase : Optional[Any] ='''efficientnet.''' + item[1]
lowercase : str ='''classifier.weight'''
lowercase : Optional[Any] ='''classifier.bias'''
return key_mapping
def lowercase_ ( __A : Any , __A : Dict , __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase : Union[str, Any] =key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase : Dict =torch.from_numpy(__A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase : Optional[int] =torch.from_numpy(__A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase : Optional[Any] =torch.from_numpy(np.transpose(__A ) )
else:
lowercase : str =torch.from_numpy(__A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__A )
@torch.no_grad()
def lowercase_ ( __A : Dict , __A : str , __A : List[Any] , __A : int ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =model_classes[model_name](
include_top=__A , weights='''imagenet''' , input_tensor=__A , input_shape=__A , pooling=__A , classes=1_0_0_0 , classifier_activation='''softmax''' , )
lowercase : Union[str, Any] =original_model.trainable_variables
lowercase : str =original_model.non_trainable_variables
lowercase : Union[str, Any] ={param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase : Dict =param.numpy()
lowercase : Any =list(tf_params.keys() )
# Load HuggingFace model
lowercase : Optional[Any] =get_efficientnet_config(__A )
lowercase : str =EfficientNetForImageClassification(__A ).eval()
lowercase : str =hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
lowercase : Optional[int] =rename_keys(__A )
replace_params(__A , __A , __A )
# Initialize preprocessor and preprocess input image
lowercase : Optional[int] =convert_image_processor(__A )
lowercase : List[Any] =preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase : Any =hf_model(**__A )
lowercase : str =outputs.logits.detach().numpy()
# Original model inference
lowercase : List[str] =False
lowercase : int =CONFIG_MAP[model_name]['''image_size''']
lowercase : str =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase : Dict =image.img_to_array(__A )
lowercase : str =np.expand_dims(__A , axis=0 )
lowercase : Tuple =original_model.predict(__A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__A , __A , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__A ):
os.mkdir(__A )
# Save converted model and image processor
hf_model.save_pretrained(__A )
preprocessor.save_pretrained(__A )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
lowercase : Optional[Any] =F'efficientnet-{model_name}'
preprocessor.push_to_hub(__A )
hf_model.push_to_hub(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 94
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def A__ ( A__ , A__ , **A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(A__ , **A__ )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(A__ )
model.save_pretrained(A__ )
AutoTokenizer.from_pretrained(A__ ).save_pretrained(A__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 426
| 0
|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCAmelCase : Tuple = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCAmelCase : List[str] = """main"""
# Default branch name
_UpperCAmelCase : str = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_UpperCAmelCase : Any = """aaaaaaa"""
# This commit does not exist, so we should 404.
_UpperCAmelCase : Union[str, Any] = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCAmelCase : Tuple = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __magic_name__( ):
print('''Welcome!''')
yield
print('''Bye!''')
@contextlib.contextmanager
def __magic_name__( ):
print('''Bonjour!''')
yield
print('''Au revoir!''')
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _snake_case (self , __lowercase ):
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _snake_case (self , __lowercase ):
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _snake_case (self , __lowercase ):
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def _snake_case (self ):
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(__lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__lowercase ) , ['''start_positions''', '''end_positions'''] )
class a__ ( __A ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
@require_tf
def _snake_case (self ):
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(__lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__lowercase ) , ['''start_positions''', '''end_positions'''] )
class a__ ( __A ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__lowercase ) , ['''labels'''] )
@require_flax
def _snake_case (self ):
# Flax models don't have labels
self.assertEqual(find_labels(__lowercase ) , [] )
self.assertEqual(find_labels(__lowercase ) , [] )
self.assertEqual(find_labels(__lowercase ) , [] )
class a__ ( __A ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__lowercase ) , [] )
| 474
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase, params=lowerCamelCase).content, '''html.parser''')
__lowerCAmelCase = soup.find('''div''', attrs={'''class''': '''gs_ri'''})
__lowerCAmelCase = div.find('''div''', attrs={'''class''': '''gs_fl'''}).find_all('''a''')
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 474
| 1
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any]=True ) -> Tuple:
model.train()
UpperCamelCase : Union[str, Any] = model(snake_case__ )
UpperCamelCase : Optional[Any] = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Any=False ) -> List[str]:
set_seed(42 )
UpperCamelCase : Tuple = RegressionModel()
UpperCamelCase : Dict = deepcopy(snake_case__ )
UpperCamelCase : Tuple = RegressionDataset(length=80 )
UpperCamelCase : Dict = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCamelCase : str = AdamW(params=model.parameters() , lr=1E-3 )
UpperCamelCase : Dict = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCamelCase : Optional[int] = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
UpperCamelCase : int = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase ( snake_case__ : str ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = get_training_setup(snake_case__ )
# Use a single batch
UpperCamelCase , UpperCamelCase : Any = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase : str = ddp_input[torch.randperm(len(snake_case__ ) )]
def UpperCamelCase ( snake_case__ : int ) -> Tuple:
# Test on distributed setup that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = get_training_setup(snake_case__ )
# Use a single batch
UpperCamelCase , UpperCamelCase : Dict = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : int = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase : str = ddp_input[torch.randperm(len(snake_case__ ) )]
def UpperCamelCase ( snake_case__ : Optional[int]=False , snake_case__ : Dict=False ) -> Tuple:
UpperCamelCase : Tuple = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
UpperCamelCase , UpperCamelCase : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCamelCase : Any = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def UpperCamelCase ( snake_case__ : Optional[int]=False , snake_case__ : List[Any]=False ) -> Union[str, Any]:
UpperCamelCase : List[Any] = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
UpperCamelCase , UpperCamelCase : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCamelCase : int = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : Any = Accelerator()
UpperCamelCase : int = RegressionDataset(length=80 )
UpperCamelCase : Dict = DataLoader(snake_case__ , batch_size=16 )
UpperCamelCase : Optional[Any] = RegressionDataset(length=96 )
UpperCamelCase : List[str] = DataLoader(snake_case__ , batch_size=16 )
UpperCamelCase , UpperCamelCase : List[Any] = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase ( ) -> int:
UpperCamelCase : str = Accelerator()
UpperCamelCase : List[Any] = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 40
|
def UpperCamelCase ( snake_case__ : int ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCamelCase : int = False
if num < 0:
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = -num
UpperCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A ( _A ):
__magic_name__ = '''cvt'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=[7, 3, 3] , SCREAMING_SNAKE_CASE=[4, 2, 2] , SCREAMING_SNAKE_CASE=[2, 1, 1] , SCREAMING_SNAKE_CASE=[64, 192, 384] , SCREAMING_SNAKE_CASE=[1, 3, 6] , SCREAMING_SNAKE_CASE=[1, 2, 10] , SCREAMING_SNAKE_CASE=[4.0, 4.0, 4.0] , SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE=[0.0, 0.0, 0.1] , SCREAMING_SNAKE_CASE=[True, True, True] , SCREAMING_SNAKE_CASE=[False, False, True] , SCREAMING_SNAKE_CASE=["dw_bn", "dw_bn", "dw_bn"] , SCREAMING_SNAKE_CASE=[3, 3, 3] , SCREAMING_SNAKE_CASE=[1, 1, 1] , SCREAMING_SNAKE_CASE=[2, 2, 2] , SCREAMING_SNAKE_CASE=[1, 1, 1] , SCREAMING_SNAKE_CASE=[1, 1, 1] , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
A : List[Any] = num_channels
A : Optional[Any] = patch_sizes
A : Optional[Any] = patch_stride
A : int = patch_padding
A : Any = embed_dim
A : Optional[int] = num_heads
A : Union[str, Any] = depth
A : Optional[Any] = mlp_ratio
A : List[str] = attention_drop_rate
A : int = drop_rate
A : Tuple = drop_path_rate
A : Any = qkv_bias
A : Dict = cls_token
A : Dict = qkv_projection_method
A : List[str] = kernel_qkv
A : List[Any] = padding_kv
A : Optional[int] = stride_kv
A : str = padding_q
A : Tuple = stride_q
A : str = initializer_range
A : Optional[Any] = layer_norm_eps
| 710
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 343
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=14 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , ) -> Dict:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = rotary_dim
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = None
UpperCamelCase_ = vocab_size - 1
UpperCamelCase_ = vocab_size - 1
UpperCamelCase_ = vocab_size - 1
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_UpperCAmelCase )
UpperCamelCase_ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCamelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase_ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_ = model(
input_ids[:, -1:] , attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_UpperCAmelCase )
UpperCamelCase_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCamelCase_ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase_ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = FlaxGPTJModelTester(self )
def _UpperCAmelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@tooslow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
UpperCamelCase_ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
UpperCamelCase_ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
UpperCamelCase_ = False
UpperCamelCase_ = model.config.eos_token_id
UpperCamelCase_ = jax.jit(model.generate )
UpperCamelCase_ = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@is_pt_flax_cross_test
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = pt_inputs['input_ids'].shape
UpperCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = pt_model_class(_UpperCAmelCase ).eval()
UpperCamelCase_ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCAmelCase )
UpperCamelCase_ = fx_state
with torch.no_grad():
UpperCamelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCamelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = model_class.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCamelCase_ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = pt_model_class(_UpperCAmelCase ).eval()
UpperCamelCase_ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCamelCase_ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , fx_model.params )
UpperCamelCase_ , UpperCamelCase_ = pt_inputs['input_ids'].shape
UpperCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 0
UpperCamelCase_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCamelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCamelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = pt_model_class.from_pretrained(_UpperCAmelCase , from_flax=_UpperCAmelCase )
with torch.no_grad():
UpperCamelCase_ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 23
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
lowerCAmelCase__ = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
lowerCAmelCase__ = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
lowerCAmelCase__ = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def UpperCamelCase ( self , lowercase , lowercase , lowercase=0.9 , lowercase=3 , lowercase=0.5 ) -> Dict:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
A__ = [
meteor_score.single_meteor_score(
word_tokenize(lowercase ) , word_tokenize(lowercase ) , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
else:
A__ = [
meteor_score.single_meteor_score(lowercase , lowercase , alpha=lowercase , beta=lowercase , gamma=lowercase )
for ref, pred in zip(lowercase , lowercase )
]
return {"meteor": np.mean(lowercase )}
| 717
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[int] , SCREAMING_SNAKE_CASE_: list[list[str]] , SCREAMING_SNAKE_CASE_: int , ) -> None:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> None:
'''simple docstring'''
A__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("" )
print(len(SCREAMING_SNAKE_CASE_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 626
| 0
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44
|
from __future__ import annotations
from math import pow, sqrt
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCamelCase_ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCamelCase_ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCamelCase_ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCAmelCase__ ( a_ : Tuple , a_ : Dict , a_ : Tuple ) -> List[str]:
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(repo_id=a_ , path=a_ , revision=a_ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a_ )}"""
| 599
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase__ : Optional[Any] = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowercase__ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase__ : Union[str, Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase__ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = dct.pop(lowerCamelCase__ )
lowercase__ : int = val
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
lowercase__ : Optional[Any] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : Tuple = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
lowercase__ : str = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = ViTConfig(image_size=384 , qkv_bias=lowerCamelCase__ )
lowercase__ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase__ : List[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase__ : Any = 1_024
lowercase__ : Optional[int] = 4_096
lowercase__ : Optional[int] = 24
lowercase__ : List[Any] = 16
lowercase__ : Optional[int] = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : Dict = False
lowercase__ : Optional[int] = "relu"
lowercase__ : Optional[Any] = 1_024
lowercase__ : int = True
lowercase__ : List[str] = False
lowercase__ : int = False
# load HuggingFace model
lowercase__ : List[str] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
lowercase__ : Optional[int] = TrOCRForCausalLM(lowerCamelCase__ )
lowercase__ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase__ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" , check_hash=lowerCamelCase__ )["model"]
lowercase__ : List[str] = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase__ : int = state_dict.pop(lowerCamelCase__ )
if key.startswith("decoder" ) and "output_projection" not in key:
lowercase__ : Optional[int] = val
else:
lowercase__ : str = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
lowercase__ : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
lowercase__ : List[Any] = RobertaTokenizer.from_pretrained("roberta-large" )
lowercase__ : int = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="pt" ).pixel_values
# verify logits
lowercase__ : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase__ : Any = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
lowercase__ : Tuple = outputs.logits
lowercase__ : Dict = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase__ : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase__ : Optional[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase__ : int = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase__ : Dict = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 496
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = EfficientNetConfig()
lowercase__ : str = CONFIG_MAP[model_name]["hidden_dim"]
lowercase__ : Union[str, Any] = CONFIG_MAP[model_name]["width_coef"]
lowercase__ : List[Any] = CONFIG_MAP[model_name]["depth_coef"]
lowercase__ : Optional[int] = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Tuple = CONFIG_MAP[model_name]["dropout_rate"]
lowercase__ : Dict = CONFIG_MAP[model_name]["dw_padding"]
lowercase__ : str = "huggingface/label-files"
lowercase__ : List[Any] = "imagenet-1k-id2label.json"
lowercase__ : Any = 1_000
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Tuple = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[str] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase__ , )
return preprocessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowercase__ : List[str] = sorted(set(lowerCamelCase__ ) )
lowercase__ : Optional[int] = len(lowerCamelCase__ )
lowercase__ : Optional[int] = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__ , range(lowerCamelCase__ ) )}
lowercase__ : Tuple = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowercase__ : Optional[Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowercase__ : Any = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ : Optional[Any] = "efficientnet." + item[1]
lowercase__ : str = "classifier.weight"
lowercase__ : Optional[int] = "classifier.bias"
return key_mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ : Optional[int] = torch.from_numpy(lowerCamelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ : str = torch.from_numpy(lowerCamelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ : Tuple = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
lowercase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = model_classes[model_name](
include_top=lowerCamelCase__ , weights="imagenet" , input_tensor=lowerCamelCase__ , input_shape=lowerCamelCase__ , pooling=lowerCamelCase__ , classes=1_000 , classifier_activation="softmax" , )
lowercase__ : List[str] = original_model.trainable_variables
lowercase__ : Optional[int] = original_model.non_trainable_variables
lowercase__ : str = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ : List[Any] = param.numpy()
lowercase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ : List[Any] = get_efficientnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
lowercase__ : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowercase__ : Optional[Any] = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
lowercase__ : str = convert_image_processor(lowerCamelCase__ )
lowercase__ : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = hf_model(**lowerCamelCase__ )
lowercase__ : Dict = outputs.logits.detach().numpy()
# Original model inference
lowercase__ : Tuple = False
lowercase__ : Dict = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ : Dict = image.img_to_array(lowerCamelCase__ )
lowercase__ : Optional[int] = np.expand_dims(lowerCamelCase__ , axis=0 )
lowercase__ : Dict = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ : List[Any] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 496
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Tuple = logging.get_logger(__name__)
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
__snake_case : List[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
__snake_case : Union[str, Any] = 'segformer.encoder.' + key
if key.startswith('backbone' ):
__snake_case : Union[str, Any] = key.replace('backbone' ,'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case : int = key[key.find('patch_embed' ) + len('patch_embed' )]
__snake_case : str = key.replace(f'''patch_embed{idx}''' ,f'''patch_embeddings.{int(_UpperCAmelCase )-1}''' )
if "norm" in key:
__snake_case : str = key.replace('norm' ,'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case : str = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
__snake_case : int = key.replace(f'''layer_norm{idx}''' ,f'''layer_norm.{int(_UpperCAmelCase )-1}''' )
if "layer_norm1" in key:
__snake_case : List[Any] = key.replace('layer_norm1' ,'layer_norm_1' )
if "layer_norm2" in key:
__snake_case : Tuple = key.replace('layer_norm2' ,'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
__snake_case : Tuple = key[key.find('block' ) + len('block' )]
__snake_case : Union[str, Any] = key.replace(f'''block{idx}''' ,f'''block.{int(_UpperCAmelCase )-1}''' )
if "attn.q" in key:
__snake_case : Optional[Any] = key.replace('attn.q' ,'attention.self.query' )
if "attn.proj" in key:
__snake_case : int = key.replace('attn.proj' ,'attention.output.dense' )
if "attn" in key:
__snake_case : Tuple = key.replace('attn' ,'attention.self' )
if "fc1" in key:
__snake_case : str = key.replace('fc1' ,'dense1' )
if "fc2" in key:
__snake_case : Any = key.replace('fc2' ,'dense2' )
if "linear_pred" in key:
__snake_case : Optional[Any] = key.replace('linear_pred' ,'classifier' )
if "linear_fuse" in key:
__snake_case : Tuple = key.replace('linear_fuse.conv' ,'linear_fuse' )
__snake_case : Tuple = key.replace('linear_fuse.bn' ,'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case : List[str] = key[key.find('linear_c' ) + len('linear_c' )]
__snake_case : int = key.replace(f'''linear_c{idx}''' ,f'''linear_c.{int(_UpperCAmelCase )-1}''' )
if key.startswith('head' ):
__snake_case : Dict = key.replace('head' ,'classifier' )
__snake_case : str = value
return new_state_dict
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case : Tuple = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
__snake_case : str = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
__snake_case : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case : int = kv_bias[: config.hidden_sizes[i]]
__snake_case : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case : Any = kv_bias[
config.hidden_sizes[i] :
]
def a_ ( ) -> Dict:
__snake_case : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : int = Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ) -> str:
__snake_case : Any = SegformerConfig()
__snake_case : Optional[Any] = False
# set attributes based on model_name
__snake_case : Union[str, Any] = 'huggingface/label-files'
if "segformer" in model_name:
__snake_case : Optional[Any] = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
__snake_case : str = 1_50
__snake_case : Optional[Any] = 'ade20k-id2label.json'
__snake_case : str = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
__snake_case : List[str] = 19
__snake_case : Any = 'cityscapes-id2label.json'
__snake_case : Optional[int] = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'''Model {model_name} not supported''' )
elif "mit" in model_name:
__snake_case : Dict = True
__snake_case : Union[str, Any] = model_name[4:6]
__snake_case : str = 10_00
__snake_case : Optional[int] = 'imagenet-1k-id2label.json'
__snake_case : Any = (1, 10_00)
else:
raise ValueError(f'''Model {model_name} not supported''' )
# set config attributes
__snake_case : Tuple = json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ,'r' ) )
__snake_case : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : str = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__snake_case : Union[str, Any] = [64, 1_28, 3_20, 5_12]
__snake_case : Dict = 2_56
elif size == "b2":
__snake_case : Optional[Any] = [64, 1_28, 3_20, 5_12]
__snake_case : Any = 7_68
__snake_case : str = [3, 4, 6, 3]
elif size == "b3":
__snake_case : List[Any] = [64, 1_28, 3_20, 5_12]
__snake_case : List[Any] = 7_68
__snake_case : Dict = [3, 4, 18, 3]
elif size == "b4":
__snake_case : str = [64, 1_28, 3_20, 5_12]
__snake_case : Optional[int] = 7_68
__snake_case : List[str] = [3, 8, 27, 3]
elif size == "b5":
__snake_case : int = [64, 1_28, 3_20, 5_12]
__snake_case : Union[str, Any] = 7_68
__snake_case : Dict = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor (only resize + normalize)
__snake_case : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=_UpperCAmelCase ,align=_UpperCAmelCase ,do_random_crop=_UpperCAmelCase )
# prepare image
__snake_case : Tuple = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
__snake_case : Dict = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )
else:
__snake_case : Tuple = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) )['state_dict']
# rename keys
__snake_case : Dict = rename_keys(_UpperCAmelCase ,encoder_only=_UpperCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase ,_UpperCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
__snake_case : int = False
__snake_case : Optional[int] = SegformerForImageClassification(_UpperCAmelCase )
else:
__snake_case : Union[str, Any] = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
__snake_case : List[Any] = model(_UpperCAmelCase )
__snake_case : Any = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__snake_case : Tuple = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__snake_case : str = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -10.35_29, -10.03_04], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__snake_case : int = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__snake_case : Dict = torch.tensor(
[
[[-9.0_8_7_8, -10.20_81, -10.18_91], [-9.3_1_4_4, -10.79_41, -10.98_43], [-9.2_2_9_4, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__snake_case : Union[str, Any] = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__snake_case : Optional[Any] = torch.tensor(
[
[[-9.5_5_2_4, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.5_8_4_2, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__snake_case : Union[str, Any] = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__snake_case : Optional[Any] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -10.17_17], [-9.4_4_3_8, -10.90_58, -11.40_47], [-9.7_9_3_9, -12.34_95, -12.10_79]],
[[-7.1_5_1_4, -9.5_3_3_6, -10.08_60], [-9.7_7_7_6, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__snake_case : Optional[Any] = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__snake_case : Optional[int] = torch.tensor(
[
[[-9.4_9_5_9, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.8_9_0_5, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__snake_case : Any = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__snake_case : Dict = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__snake_case : Any = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__snake_case : str = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__snake_case : str = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
__snake_case : int = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] ,_UpperCAmelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ : List[Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 707
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__ :
def __init__( self : Optional[int] , __a : Any , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=True , __a : List[Any]=True , __a : Optional[Any]=True , __a : Optional[Any]=True , __a : Optional[int]=99 , __a : List[Any]=32 , __a : Optional[int]=2 , __a : Optional[Any]=4 , __a : Dict=37 , __a : str="gelu" , __a : str=0.1 , __a : List[Any]=0.1 , __a : Optional[Any]=512 , __a : Optional[int]=16 , __a : List[Any]=2 , __a : Any=0.0_2 , __a : Tuple=3 , __a : Optional[int]=4 , __a : List[str]=None , __a : str=1000 , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : List[str] = is_training
__snake_case : Union[str, Any] = use_input_mask
__snake_case : Tuple = use_token_type_ids
__snake_case : List[str] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Tuple = scope
__snake_case : List[str] = range_bbox
def A_ ( self : int ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[Any] = bbox[i, j, 3]
__snake_case : Optional[int] = bbox[i, j, 1]
__snake_case : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : int = bbox[i, j, 2]
__snake_case : Any = bbox[i, j, 0]
__snake_case : Any = t
__snake_case : Any = tf.convert_to_tensor(__a )
__snake_case : Optional[Any] = None
if self.use_input_mask:
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : str = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Any = None
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : int ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMModel(config=__a )
__snake_case : Union[str, Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a )
__snake_case : str = model(__a , __a , token_type_ids=__a )
__snake_case : List[str] = model(__a , __a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Dict , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : str , __a : List[Any] , __a : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMForMaskedLM(config=__a )
__snake_case : Union[str, Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : List[str] , __a : Any , __a : Dict , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : str , __a : Optional[int] , __a : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : str = TFLayoutLMForSequenceClassification(config=__a )
__snake_case : Any = model(__a , __a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] , __a : List[Any] , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Any , __a : Union[str, Any] , __a : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : Union[str, Any] = TFLayoutLMForTokenClassification(config=__a )
__snake_case : str = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __a : Optional[Any] , __a : List[str] , __a : str , __a : str , __a : Optional[int] , __a : Tuple , __a : Any , __a : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMForQuestionAnswering(config=__a )
__snake_case : Optional[Any] = model(__a , __a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : int = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
A__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = True
A__ = 10
def A_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = TFLayoutLMModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def A_ ( self : Dict ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
@slow
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[int] = TFLayoutLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
pass
def a_ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
__snake_case : Optional[Any] = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
__snake_case : List[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__snake_case : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
__snake_case : Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__snake_case : int = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : List[str] = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a )
# test the sequence output on [0, :3, :3]
__snake_case : Tuple = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-3 ) )
# test the pooled output on [1, :3]
__snake_case : Any = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __a , atol=1e-3 ) )
@slow
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
__snake_case : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : int = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__snake_case : Optional[int] = outputs.loss
__snake_case : Tuple = (2,)
self.assertEqual(loss.shape , __a )
# test the shape of the logits
__snake_case : Optional[int] = outputs.logits
__snake_case : str = (2, 2)
self.assertEqual(logits.shape , __a )
@slow
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized token classification head
__snake_case : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : List[str] = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=__a )
# test the shape of the logits
__snake_case : Any = outputs.logits
__snake_case : Optional[int] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , __a )
@slow
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
# initialize model with randomly initialized token classification head
__snake_case : List[str] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = prepare_layoutlm_batch_inputs()
# forward pass
__snake_case : Optional[Any] = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a )
# test the shape of the logits
__snake_case : Optional[Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , __a )
self.assertEqual(outputs.end_logits.shape , __a )
| 124
| 0
|
import re
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : List[Any] = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 89
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple =torch.exp(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
__lowerCamelCase : Optional[int] =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , __lowercase :int ):
super().__init__()
__lowerCamelCase : str =config.output_attentions
__lowerCamelCase : List[Any] =config.output_hidden_states
__lowerCamelCase : Dict =nn.ModuleList([BertLayer(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : str =nn.ModuleList([BertHighway(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def __lowercase ( self :Union[str, Any] , __lowercase :Union[str, Any] ):
if (type(__lowercase ) is float) or (type(__lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCamelCase : Tuple =x
else:
__lowerCamelCase : Any =x
def __lowercase ( self :Union[str, Any] , __lowercase :Tuple ):
__lowerCamelCase : Union[str, Any] =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowercase ( self :Tuple , __lowercase :Optional[int] , __lowercase :Dict=None , __lowercase :Union[str, Any]=None , __lowercase :List[str]=None , __lowercase :str=None , ):
__lowerCamelCase : Any =()
__lowerCamelCase : List[str] =()
__lowerCamelCase : Optional[int] =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCamelCase : int =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =layer_module(
__lowercase , __lowercase , head_mask[i] , __lowercase , __lowercase )
__lowerCamelCase : Optional[int] =layer_outputs[0]
if self.output_attentions:
__lowerCamelCase : Optional[Any] =all_attentions + (layer_outputs[1],)
__lowerCamelCase : Any =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Dict =current_outputs + (all_attentions,)
__lowerCamelCase : str =self.highway[i](__lowercase )
# logits, pooled_output
if not self.training:
__lowerCamelCase : Tuple =highway_exit[0]
__lowerCamelCase : Tuple =entropy(__lowercase )
__lowerCamelCase : Tuple =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCamelCase : Optional[int] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCamelCase : Dict =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowercase , i + 1 )
else:
__lowerCamelCase : Union[str, Any] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Tuple =outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Optional[int] =outputs + (all_attentions,)
__lowerCamelCase : int =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :str ):
super().__init__(__lowercase )
__lowerCamelCase : Union[str, Any] =config
__lowerCamelCase : List[str] =BertEmbeddings(__lowercase )
__lowerCamelCase : Dict =DeeBertEncoder(__lowercase )
__lowerCamelCase : List[Any] =BertPooler(__lowercase )
self.init_weights()
def __lowercase ( self :Tuple ):
self.encoder.init_highway_pooler(self.pooler )
def __lowercase ( self :Dict ):
return self.embeddings.word_embeddings
def __lowercase ( self :List[str] , __lowercase :int ):
__lowerCamelCase : Union[str, Any] =value
def __lowercase ( self :List[Any] , __lowercase :Dict ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :Optional[Any] , __lowercase :List[str]=None , __lowercase :List[Any]=None , __lowercase :Any=None , __lowercase :Tuple=None , __lowercase :Union[str, Any]=None , __lowercase :Optional[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Tuple=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCamelCase : List[str] =input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase : str =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCamelCase : Optional[int] =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase : str =torch.ones(__lowercase , device=__lowercase )
if encoder_attention_mask is None:
__lowerCamelCase : Tuple =torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCamelCase : List[Any] =torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase : torch.Tensor =self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCamelCase : List[str] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCamelCase : Any =encoder_attention_mask[:, None, None, :]
__lowerCamelCase : Optional[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCamelCase : List[str] =(1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase : Union[str, Any] =self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCamelCase : str =self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCamelCase : Dict =self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCamelCase : int =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
__lowerCamelCase : int =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :Optional[Any] , __lowercase :Dict ):
__lowerCamelCase : List[Any] =message
__lowerCamelCase : int =exit_layer # start from 1!
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Any , __lowercase :str ):
super().__init__()
__lowerCamelCase : str =BertPooler(__lowercase )
__lowerCamelCase : Union[str, Any] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : List[str] =nn.Linear(config.hidden_size , config.num_labels )
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] ):
# Pooler
__lowerCamelCase : Optional[Any] =encoder_outputs[0]
__lowerCamelCase : Any =self.pooler(__lowercase )
# "return" pooler_output
# BertModel
__lowerCamelCase : List[str] =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCamelCase : List[Any] =bmodel_output[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : int =self.classifier(__lowercase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :Dict ):
super().__init__(__lowercase )
__lowerCamelCase : Any =config.num_labels
__lowerCamelCase : int =config.num_hidden_layers
__lowerCamelCase : Tuple =DeeBertModel(__lowercase )
__lowerCamelCase : Optional[int] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : Optional[int] =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :List[str] , __lowercase :List[str]=None , __lowercase :str=None , __lowercase :Optional[Any]=None , __lowercase :List[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Dict=None , __lowercase :int=None , __lowercase :int=-1 , __lowercase :List[str]=False , ):
__lowerCamelCase : Union[str, Any] =self.num_layers
try:
__lowerCamelCase : Union[str, Any] =self.bert(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCamelCase : List[Any] =outputs[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : Tuple =self.classifier(__lowercase )
__lowerCamelCase : int =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase : Union[str, Any] =e.message
__lowerCamelCase : Optional[Any] =e.exit_layer
__lowerCamelCase : Any =outputs[0]
if not self.training:
__lowerCamelCase : List[Any] =entropy(__lowercase )
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Union[str, Any] =MSELoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : Dict =CrossEntropyLoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase : str =[]
for highway_exit in outputs[-1]:
__lowerCamelCase : List[str] =highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Optional[int] =MSELoss()
__lowerCamelCase : Optional[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : int =CrossEntropyLoss()
__lowerCamelCase : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
__lowerCamelCase : Dict =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase : List[str] =(loss,) + outputs
if not self.training:
__lowerCamelCase : List[Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase : Dict =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 179
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowercase ='xvjiarui/stable-diffusion-2-inpainting'
__lowercase , __lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase , safety_checker=_lowerCAmelCase)
__lowercase ='Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase =jax.random.PRNGKey(0)
__lowercase =5_0
__lowercase =jax.device_count()
__lowercase =num_samples * [prompt]
__lowercase =num_samples * [init_image]
__lowercase =num_samples * [mask_image]
__lowercase , __lowercase , __lowercase =pipeline.prepare_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# shard inputs and rng
__lowercase =replicate(_lowerCAmelCase)
__lowercase =jax.random.split(_lowerCAmelCase , jax.device_count())
__lowercase =shard(_lowerCAmelCase)
__lowercase =shard(_lowerCAmelCase)
__lowercase =shard(_lowerCAmelCase)
__lowercase =pipeline(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase)
__lowercase =output.images.reshape(_lowerCAmelCase , 5_1_2 , 5_1_2 , 3)
__lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase =jnp.asarray(jax.device_get(image_slice.flatten()))
__lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 706
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowercase ='xvjiarui/stable-diffusion-2-inpainting'
__lowercase , __lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase , safety_checker=_lowerCAmelCase)
__lowercase ='Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase =jax.random.PRNGKey(0)
__lowercase =5_0
__lowercase =jax.device_count()
__lowercase =num_samples * [prompt]
__lowercase =num_samples * [init_image]
__lowercase =num_samples * [mask_image]
__lowercase , __lowercase , __lowercase =pipeline.prepare_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# shard inputs and rng
__lowercase =replicate(_lowerCAmelCase)
__lowercase =jax.random.split(_lowerCAmelCase , jax.device_count())
__lowercase =shard(_lowerCAmelCase)
__lowercase =shard(_lowerCAmelCase)
__lowercase =shard(_lowerCAmelCase)
__lowercase =pipeline(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase)
__lowercase =output.images.reshape(_lowerCAmelCase , 5_1_2 , 5_1_2 , 3)
__lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase =jnp.asarray(jax.device_get(image_slice.flatten()))
__lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 454
| 0
|
'''simple docstring'''
def __a ( A__ , A__ , A__ ) -> Tuple:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A__ , n - 1 , A__ ) * a) % mod
else:
lowerCAmelCase = binary_exponentiation(A__ , n / 2 , A__ )
return (b * b) % mod
# a prime number
lowercase : List[str] = 7_0_1
lowercase : Union[str, Any] = 1_0_0_0_0_0_0_0_0_0
lowercase : Tuple = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 649
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = 'switch_transformers'
lowerCAmelCase = ['past_key_values']
lowerCAmelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE : Tuple=7_6_8 , SCREAMING_SNAKE_CASE : Union[str, Any]=6_4 , SCREAMING_SNAKE_CASE : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE : Union[str, Any]=6_4 , SCREAMING_SNAKE_CASE : int=1_2 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : int=1_2 , SCREAMING_SNAKE_CASE : str=8 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Any=0.0_1 , SCREAMING_SNAKE_CASE : Union[str, Any]="float32" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE : str=1_2_8 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=1E-6 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_0_1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0_0_1 , SCREAMING_SNAKE_CASE : List[str]=1.0 , SCREAMING_SNAKE_CASE : str="relu" , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : List[Any] , ) -> Dict:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split("-" )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == "gated"
if len(SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = "gelu_new"
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 649
| 1
|
'''simple docstring'''
def _snake_case ( lowercase ) -> list:
if len(lowercase ) < 2:
return collection
def circle_sort_util(lowercase , lowercase , lowercase ) -> bool:
__a : Optional[int] = False
if low == high:
return swapped
__a : List[Any] = low
__a : List[str] = high
while left < right:
if collection[left] > collection[right]:
__a , __a : Optional[Any] = (
collection[right],
collection[left],
)
__a : Dict = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__a , __a : Optional[int] = (
collection[right + 1],
collection[left],
)
__a : List[str] = True
__a : Any = low + int((high - low) / 2 )
__a : int = circle_sort_util(lowercase , lowercase , lowercase )
__a : Tuple = circle_sort_util(lowercase , mid + 1 , lowercase )
return swapped or left_swap or right_swap
__a : int = True
while is_not_sorted is True:
__a : Any = circle_sort_util(lowercase , 0 , len(lowercase ) - 1 )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Dict = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 697
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : Tuple = batch_size
__a : str = image_size
__a : int = patch_size
__a : Dict = num_channels
__a : int = is_training
__a : Dict = use_labels
__a : Union[str, Any] = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = backbone_out_indices
__a : Optional[int] = num_attention_heads
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Any = num_labels
__a : Optional[Any] = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : Union[str, Any] = (image_size // patch_size) ** 2
__a : List[str] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Union[str, Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[str] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Optional[Any] = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__a : str = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DPTModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Any = model_class(__UpperCamelCase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[Any] = True
if model_class in get_values(__UpperCamelCase ):
continue
__a : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : List[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = False
__a : Dict = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Any = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__a : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__a : Any = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : int = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = """add"""
with self.assertRaises(__UpperCamelCase ):
__a : int = DPTForDepthEstimation(__UpperCamelCase )
def _snake_case ( ) -> Any:
__a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
__a : Union[str, Any] = prepare_img()
__a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__UpperCamelCase )
__a : int = outputs.predicted_depth
# verify the predicted depth
__a : Any = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__a : int = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
| 697
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( __snake_case , unittest.TestCase ):
a_ : int = ShapEPipeline
a_ : Tuple = ['prompt']
a_ : str = ['prompt']
a_ : Dict = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
a_ : str = False
@property
def _UpperCamelCase ( self : Dict ):
return 32
@property
def _UpperCamelCase ( self : Optional[Any] ):
return 32
@property
def _UpperCamelCase ( self : List[str] ):
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Tuple ):
return 8
@property
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowerCamelCase__ = PriorTransformer(**SCREAMING_SNAKE_CASE__ )
return model
@property
def _UpperCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCamelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase__ = ShapERenderer(**SCREAMING_SNAKE_CASE__ )
return model
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.dummy_prior
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_renderer
lowerCamelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , )
lowerCamelCase__ = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = output.images[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = torch_device == 'cpu'
lowerCamelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self : Tuple ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase__ = batch_size * [inputs[key]]
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowerCamelCase__ = ShapEPipeline.from_pretrained('openai/shap-e' )
lowerCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
lowerCamelCase__ = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 510
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 585
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'vivit'
def __init__( self : Union[str, Any] , _A : Union[str, Any]=224 , _A : List[str]=32 , _A : Dict=[2, 16, 16] , _A : str=3 , _A : Any=768 , _A : Optional[Any]=12 , _A : int=12 , _A : Tuple=3_072 , _A : Any="gelu_fast" , _A : int=0.0 , _A : Union[str, Any]=0.0 , _A : Any=0.0_2 , _A : Tuple=1e-06 , _A : int=True , **_A : int , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : int = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : List[Any] = num_frames
UpperCAmelCase__ : Tuple = tubelet_size
UpperCAmelCase__ : int = num_channels
UpperCAmelCase__ : Dict = qkv_bias
super().__init__(**_A )
| 312
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = 8.9_88e9 # units = N * m^s * C^-2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict[str, float]:
UpperCAmelCase__ : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
UpperCAmelCase__ : Optional[int] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCAmelCase__ : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_a : Optional[Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __lowercase ( self ) -> Tuple:
_a : Tuple = '''sshleifer/tiny-gpt2'''
_a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : int = PyTorchBenchmark(_a )
_a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase ( self ) -> str:
_a : str = '''sgugger/tiny-distilbert-classification'''
_a : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
_a : Any = PyTorchBenchmark(_a )
_a : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase ( self ) -> int:
_a : Optional[Any] = '''sshleifer/tiny-gpt2'''
_a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : int = PyTorchBenchmark(_a )
_a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowercase ( self ) -> int:
_a : Optional[int] = '''sshleifer/tiny-gpt2'''
_a : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : int = PyTorchBenchmark(_a )
_a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = '''sshleifer/tiny-gpt2'''
_a : Dict = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
_a : List[Any] = None
_a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : List[Any] = PyTorchBenchmark(_a , configs=[config] )
_a : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase ( self ) -> Dict:
_a : Tuple = '''sshleifer/tiny-gpt2'''
_a : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : Dict = PyTorchBenchmark(_a )
_a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = '''sshleifer/tiny-gpt2'''
_a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
_a : Tuple = PyTorchBenchmark(_a )
_a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase ( self ) -> Any:
_a : int = '''sshleifer/tiny-gpt2'''
_a : List[str] = AutoConfig.from_pretrained(_a )
_a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : Tuple = PyTorchBenchmark(_a , configs=[config] )
_a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = '''sshleifer/tinier_bart'''
_a : Optional[int] = AutoConfig.from_pretrained(_a )
_a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : List[str] = PyTorchBenchmark(_a , configs=[config] )
_a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowercase ( self ) -> int:
_a : Optional[int] = '''sshleifer/tiny-gpt2'''
_a : int = AutoConfig.from_pretrained(_a )
_a : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : Tuple = PyTorchBenchmark(_a , configs=[config] )
_a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase ( self ) -> List[Any]:
_a : Dict = '''sshleifer/tinier_bart'''
_a : Dict = AutoConfig.from_pretrained(_a )
_a : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
_a : int = PyTorchBenchmark(_a , configs=[config] )
_a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowercase ( self ) -> List[str]:
_a : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
_a : List[str] = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_a : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
_a : List[str] = PyTorchBenchmark(_a )
_a : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 14
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653
| 0
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowerCamelCase ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = None
snake_case__ = None
@property
def a ( self : int ) -> Optional[int]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def a ( self : Any ) -> Optional[int]:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "feature_size" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "sampling_rate" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "padding_value" ) )
def a ( self : str ) -> Optional[Any]:
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowerCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def a ( self : Dict ) -> Dict:
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowerCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def a ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
lowerCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Union[str, Any]:
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : Dict ):
lowerCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1e-3 ):
return False
return True
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = self.feat_extract_tester.seq_length_diff
lowerCAmelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowerCAmelCase__ = self.feat_extract_tester.min_seq_length
lowerCAmelCase__ = self.feat_extract_tester.batch_size
lowerCAmelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[-1] ) )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np" )
lowerCAmelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding="max_length" )[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , return_tensors="np" )
lowerCAmelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , pad_to_multiple_of=10 )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , pad_to_multiple_of=10 )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ , return_tensors="np" , )
lowerCAmelCase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCAmelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Tuple:
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1e-3 ):
return False
return True
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=lowerCAmelCase_ )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) )
lowerCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to smallest with np
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=lowerCAmelCase_ , )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
lowerCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to middle
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ , return_tensors="np" , )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
lowerCAmelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding="longest" , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding="longest" , truncation=lowerCAmelCase_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding="max_length" , truncation=lowerCAmelCase_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCAmelCase__ = 12
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
lowerCAmelCase__ = input_a[input_name]
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , )
lowerCAmelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCAmelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCAmelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
def a ( self : Any ) -> List[Any]:
self._check_padding(numpify=lowerCAmelCase_ )
def a ( self : int ) -> Union[str, Any]:
self._check_padding(numpify=lowerCAmelCase_ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
self._check_truncation(numpify=lowerCAmelCase_ )
def a ( self : Dict ) -> Union[str, Any]:
self._check_truncation(numpify=lowerCAmelCase_ )
@require_torch
def a ( self : str ) -> Tuple:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def a ( self : List[Any] ) -> Any:
lowerCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np" )[input_name]
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = self.feat_extract_dict
lowerCAmelCase__ = True
lowerCAmelCase__ = self.feature_extraction_class(**lowerCAmelCase_ )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ = [len(lowerCAmelCase_ ) for x in speech_inputs]
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = feat_extract.pad(lowerCAmelCase_ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def a ( self : str ) -> Optional[Any]:
lowerCAmelCase__ = self.feat_extract_dict
lowerCAmelCase__ = True
lowerCAmelCase__ = self.feature_extraction_class(**lowerCAmelCase_ )
lowerCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowerCAmelCase__ = [len(lowerCAmelCase_ ) for x in speech_inputs]
lowerCAmelCase__ = feat_extract.model_input_names[0]
lowerCAmelCase__ = BatchFeature({input_name: speech_inputs} )
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = feat_extract.pad(
lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 712
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "ViTImageProcessor"
snake_case__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
lowerCAmelCase__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def a ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def a ( self : Dict ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def a ( self : Union[str, Any] ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 125
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Dict , a_ : List[Any] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : int = 3_2 , a_ : bool = True , a_ : Union[int, float] = 1 / 2_5_5 , a_ : bool = True , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , a_ : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , a_ : bool = True , a_ : Optional[Any]=7 , a_ : Any=3_0 , a_ : Optional[int]=4_0_0 , a_ : Optional[Any]=3 , ) -> Union[str, Any]:
snake_case: int =parent
snake_case: Union[str, Any] =do_resize
snake_case: Union[str, Any] =size if size is not None else {'shortest_edge': 2_8_8}
snake_case: Union[str, Any] =size_divisor
snake_case: Dict =do_rescale
snake_case: List[Any] =rescale_factor
snake_case: Tuple =do_normalize
snake_case: List[Any] =do_center_crop
snake_case: Union[str, Any] =image_mean
snake_case: Optional[Any] =image_std
snake_case: Union[str, Any] =do_pad
snake_case: int =batch_size
snake_case: Optional[Any] =num_channels
snake_case: str =min_resolution
snake_case: List[str] =max_resolution
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCamelCase ( self : Optional[int] , a_ : str , a_ : Optional[Any]=False ) -> str:
if not batched:
snake_case: str =self.size['shortest_edge']
snake_case: Union[str, Any] =image_inputs[0]
if isinstance(a_ , Image.Image ):
snake_case , snake_case: Tuple =image.size
else:
snake_case , snake_case: Dict =image.shape[1], image.shape[2]
snake_case: Tuple =size / min(a_ , a_ )
if h < w:
snake_case , snake_case: List[Any] =size, scale * w
else:
snake_case , snake_case: List[str] =scale * h, size
snake_case: List[Any] =int((1_3_3_3 / 8_0_0) * size )
if max(a_ , a_ ) > max_size:
snake_case: List[Any] =max_size / max(a_ , a_ )
snake_case: List[str] =newh * scale
snake_case: Optional[int] =neww * scale
snake_case , snake_case: Dict =int(newh + 0.5 ), int(neww + 0.5 )
snake_case , snake_case: List[str] =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case: Any =[]
for image in image_inputs:
snake_case , snake_case: Union[str, Any] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case: int =max(a_ , key=lambda a_ : item[0] )[0]
snake_case: Any =max(a_ , key=lambda a_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : str ) -> Dict:
snake_case: Tuple =BridgeTowerImageProcessingTester(self )
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : List[str] ) -> Dict:
snake_case: Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'image_mean' ) )
self.assertTrue(hasattr(a_ , 'image_std' ) )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_resize' ) )
self.assertTrue(hasattr(a_ , 'size' ) )
self.assertTrue(hasattr(a_ , 'size_divisor' ) )
def UpperCamelCase ( self : Any ) -> List[Any]:
pass
def UpperCamelCase ( self : Optional[int] ) -> Any:
# Initialize image processor
snake_case: Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case: Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
snake_case: List[str] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case , snake_case: Optional[int] =self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case: str =image_processing(a_ , return_tensors='pt' ).pixel_values
snake_case , snake_case: Optional[Any] =self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Optional[int] ) -> Dict:
# Initialize image processor
snake_case: Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case: str =prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
snake_case: Optional[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case , snake_case: Optional[int] =self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case: int =image_processing(a_ , return_tensors='pt' ).pixel_values
snake_case , snake_case: str =self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Any ) -> Any:
# Initialize image processor
snake_case: int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case: Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
snake_case: int =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case , snake_case: List[str] =self.image_processor_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case: Union[str, Any] =image_processing(a_ , return_tensors='pt' ).pixel_values
snake_case , snake_case: Optional[int] =self.image_processor_tester.get_expected_values(a_ , batched=a_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 350
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a_ :
pass
| 350
| 1
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCAmelCase__="</s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__=125 , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A__ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A__ = len(set(filter(lambda UpperCAmelCase__ : bool("extra_id" in str(UpperCAmelCase__ ) ) , UpperCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
A__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , extra_ids=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = extra_ids
A__ = 2**8 # utf is 8 bits
# define special tokens dict
A__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
A__ = len(self.special_tokens_encoder )
A__ = len(UpperCAmelCase__ )
for i, token in enumerate(UpperCAmelCase__ ):
A__ = self.vocab_size + i - n
A__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase__ )) + [1]
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __A ( self , UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = self._add_eos_if_not_present(UpperCAmelCase__ )
if token_ids_a is None:
return token_ids_a
else:
A__ = self._add_eos_if_not_present(UpperCAmelCase__ )
return token_ids_a + token_ids_a
def __A ( self , UpperCAmelCase__ ):
A__ = [chr(UpperCAmelCase__ ) for i in text.encode("utf-8" )]
return tokens
def __A ( self , UpperCAmelCase__ ):
if token in self.special_tokens_encoder:
A__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
A__ = self.added_tokens_encoder[token]
elif len(UpperCAmelCase__ ) != 1:
A__ = self.unk_token_id
else:
A__ = ord(UpperCAmelCase__ ) + self._num_special_tokens
return token_id
def __A ( self , UpperCAmelCase__ ):
if index in self.special_tokens_decoder:
A__ = self.special_tokens_decoder[index]
else:
A__ = chr(index - self._num_special_tokens )
return token
def __A ( self , UpperCAmelCase__ ):
A__ = b""
for token in tokens:
if token in self.special_tokens_decoder:
A__ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
A__ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
A__ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
A__ = token.encode("utf-8" )
else:
A__ = bytes([ord(UpperCAmelCase__ )] )
bstring += tok_string
A__ = bstring.decode("utf-8" , errors="ignore" )
return string
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
return ()
| 232
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase_ : Union[str, Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase_ : Any = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase_ : Union[str, Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase_ : Union[str, Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase_ : List[str] = "allenai"
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = dict((re.sub(R"@@$" , "" , _A ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , _A ), v) for k, v in d.items() )
A__ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
A__ = d[k] # restore
return da
def UpperCamelCase ( _A : str , _A : Dict )-> Dict:
"""simple docstring"""
assert os.path.exists(_A )
os.makedirs(_A , exist_ok=_A )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
A__ = basename(_A )
A__ = dirname(_A )
A__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A__ = cls.hub_models()
A__ = {"bpe": "fastbpe", "tokenizer": "moses"}
A__ = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
A__ = hub_utils.from_pretrained(
_A , _A , _A , archive_map=_A , **_A )
A__ = vars(chkpt["args"]["model"] )
A__ = args["source_lang"]
A__ = args["target_lang"]
A__ = dirname(_A )
A__ = basename(_A )
# dicts
A__ = os.path.join(_A , f"""dict.{src_lang}.txt""" )
A__ = os.path.join(_A , f"""dict.{tgt_lang}.txt""" )
A__ = Dictionary.load(_A )
A__ = rewrite_dict_keys(src_dict.indices )
A__ = len(_A )
A__ = os.path.join(_A , "vocab-src.json" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A__ = True
for k in src_vocab.keys():
if not k.islower():
A__ = False
break
A__ = Dictionary.load(_A )
A__ = rewrite_dict_keys(tgt_dict.indices )
A__ = len(_A )
A__ = os.path.join(_A , "vocab-tgt.json" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# merges_file (bpecodes)
A__ = os.path.join(_A , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A__ = os.path.join(_A , _A )
if os.path.exists(_A ):
break
with open(_A , encoding="utf-8" ) as fin:
A__ = fin.read()
A__ = re.sub(R" \d+$" , "" , _A , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_A , "w" , encoding="utf-8" ) as fout:
fout.write(_A )
# model config
A__ = os.path.join(_A , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
A__ = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
A__ = 5
A__ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A__ = best_score_hparams[model_dir]["length_penalty"]
else:
A__ = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# tokenizer config
A__ = os.path.join(_A , _A )
A__ = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , ensure_ascii=_A , indent=_A ) )
# model
A__ = chkpt["models"][0]
A__ = model.state_dict()
# rename keys to start with 'model.'
A__ = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A__ = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(_A , _A )
A__ = FSMTConfig.from_pretrained(_A )
A__ = FSMTForConditionalGeneration(_A )
# check that it loads ok
model_new.load_state_dict(_A , strict=_A )
# save
A__ = os.path.join(_A , _A )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_A , _A )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 232
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCamelCase : List[str] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase__ = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase__ = field(default=__magic_name__ , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
a = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
a = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __A ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
a = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
a = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
a = data_args.train_file.split(""".""" )[-1]
a = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
a = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
a = load_dataset("""csv""" , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
a = load_dataset("""json""" , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
a = raw_datasets["""train"""].features["""label"""].names
a = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
a = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
a = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
a = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
a = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
a = {"""Refused""": 0, """Entailed""": 1}
a = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
a = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__lowerCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowerCamelCase ):
a = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
a = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
a = examples["""statement"""]
a = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
a = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
a = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
a = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
a = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
a = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
a = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
a = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
a = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
a = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase ):
a = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
a = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
a = default_data_collator
elif training_args.fpaa:
a = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
a = None
# Initialize our Trainer
a = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=__lowerCamelCase )
a = train_result.metrics
a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
a = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCamelCase )
trainer.save_metrics("""train""" , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
a = trainer.evaluate(eval_dataset=__lowerCamelCase )
a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
a = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""eval""" , __lowerCamelCase )
trainer.save_metrics("""eval""" , __lowerCamelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
a = predict_dataset.remove_columns("""label""" )
a = trainer.predict(__lowerCamelCase , metric_key_prefix="""predict""" ).predictions
a = np.argmax(__lowerCamelCase , axis=1 )
a = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCamelCase ):
a = label_list[item]
writer.write(f'{index}\t{item}\n' )
a = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def __A ( __lowerCamelCase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 468
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
a = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
a = requests.get(url + f'&page={i + 2}' , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
a = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
a = requests.get(url + f'&page={i + 2}' , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = None
if token is not None:
a = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
a = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
a = result.headers["""Location"""]
a = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
a = os.path.join(__lowerCamelCase , f'{artifact_name}.zip' )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = []
a = []
a = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
a = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a = line[: line.index(""": """ )]
a = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
a = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
a = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` '
f'and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
a = None
if job_name and job_links:
a = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
a = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Dict:
a = []
a = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
a = Counter()
counter.update([x[1] for x in logs] )
a = counter.most_common()
a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def __A ( __lowerCamelCase ) -> List[str]:
a = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
a = test.split("""/""" )[2]
else:
a = None
return test
def __A ( __lowerCamelCase , __lowerCamelCase=None ) -> Any:
a = [(x[0], x[1], get_model(x[2] )) for x in logs]
a = [x for x in logs if x[2] is not None]
a = {x[2] for x in logs}
a = {}
for test in tests:
a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a = counter.most_common()
a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a = sum(error_counts.values() )
if n_errors > 0:
a = {"""count""": n_errors, """errors""": error_counts}
a = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def __A ( __lowerCamelCase ) -> Optional[int]:
a = """| no. | error | status |"""
a = """|-:|:-|:-|"""
a = [header, sep]
for error in reduced_by_error:
a = reduced_by_error[error]["""count"""]
a = f'| {count} | {error[:100]} | |'
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> int:
a = """| model | no. of errors | major error | count |"""
a = """|-:|-:|-:|-:|"""
a = [header, sep]
for model in reduced_by_model:
a = reduced_by_model[model]["""count"""]
a , a = list(reduced_by_model[model]["""errors"""].items() )[0]
a = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__UpperCamelCase : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCamelCase : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
__UpperCamelCase : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCamelCase : List[str] = k.find(" / ")
__UpperCamelCase : List[Any] = k[index + len(" / ") :]
__UpperCamelCase : List[str] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCamelCase : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCamelCase : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCamelCase : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCamelCase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCamelCase : Union[str, Any] = reduce_by_error(errors)
__UpperCamelCase : Dict = reduce_by_model(errors)
__UpperCamelCase : Union[str, Any] = make_github_table(reduced_by_error)
__UpperCamelCase : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 468
| 1
|
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE__ = str(bin(_A ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(_A ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(_A ) , len(_A ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(_A ) , b_binary.zfill(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 472
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = original_name.split('''.''' )[0]
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(_A ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find('''proj''' )]
SCREAMING_SNAKE_CASE__ = key.replace(_A , F'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm1''' , '''before_norm''' )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(_A , _A , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 10_00)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(_A )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=_A )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 472
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 24
UpperCamelCase = 16
UpperCamelCase = [5, 11, 17, 23]
UpperCamelCase = [256, 512, 1_024, 1_024]
UpperCamelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCamelCase = 768
UpperCamelCase = [1, 1, 1, 0.5]
UpperCamelCase = [256, 512, 768, 768]
UpperCamelCase = 150
UpperCamelCase = 16
UpperCamelCase = (1, 384, 384)
UpperCamelCase = False
UpperCamelCase = 'project'
if "ade" in checkpoint_url:
UpperCamelCase = True
UpperCamelCase = 768
UpperCamelCase = [1, 1, 1, 0.5]
UpperCamelCase = 150
UpperCamelCase = 16
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = 'ade20k-id2label.json'
UpperCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = [1, 150, 480, 480]
return config, expected_shape
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCamelCase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
UpperCamelCase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
UpperCamelCase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
UpperCamelCase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
UpperCamelCase = name.replace('proj' , 'projection' )
if "blocks" in name:
UpperCamelCase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
UpperCamelCase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
UpperCamelCase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
UpperCamelCase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
UpperCamelCase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
UpperCamelCase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
UpperCamelCase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
UpperCamelCase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCamelCase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCamelCase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
UpperCamelCase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
UpperCamelCase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
UpperCamelCase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
UpperCamelCase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
UpperCamelCase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
UpperCamelCase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
UpperCamelCase = name.replace('bn' , 'batch_norm' )
if "head" in name:
UpperCamelCase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
UpperCamelCase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
UpperCamelCase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
UpperCamelCase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
UpperCamelCase = name.replace('..' , '.' )
if "stem.conv" in name:
UpperCamelCase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCamelCase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
UpperCamelCase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
UpperCamelCase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
UpperCamelCase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
UpperCamelCase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCamelCase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCamelCase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[: config.hidden_size, :]
UpperCamelCase = in_proj_bias[: config.hidden_size]
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCamelCase = torch.load(A__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
UpperCamelCase = DPTForSemanticSegmentation(A__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
UpperCamelCase = 480 if 'ade' in checkpoint_url else 384
UpperCamelCase = DPTImageProcessor(size=A__ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(A__ , return_tensors='pt' )
# forward pass
UpperCamelCase = model(**A__ ).logits if 'ade' in checkpoint_url else model(**A__ ).predicted_depth
if show_prediction:
UpperCamelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=A__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_lowerCamelCase : int = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 430
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def A ( self : str , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(UpperCamelCase__ , 'weight' , UpperCamelCase__ , {} )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase__ ).split('.' )[1]}} )
UpperCamelCase = load_offloaded_weight(UpperCamelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1': 0, 'a.2': 2} )
UpperCamelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 430
| 1
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : bytes , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = f"""{sampling_rate}"""
lowerCAmelCase : str = "1"
lowerCAmelCase : int = "f32le"
lowerCAmelCase : List[str] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase : Any = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowerCAmelCase : Union[str, Any] = output_stream[0]
lowerCAmelCase : Tuple = np.frombuffer(SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : str = "f32le" , ):
'''simple docstring'''
lowerCAmelCase : List[Any] = f"""{sampling_rate}"""
lowerCAmelCase : Tuple = "1"
if format_for_conversion == "s16le":
lowerCAmelCase : List[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : List[str] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowerCAmelCase : Union[str, Any] = platform.system()
if system == "Linux":
lowerCAmelCase : Any = "alsa"
lowerCAmelCase : Optional[int] = "default"
elif system == "Darwin":
lowerCAmelCase : Union[str, Any] = "avfoundation"
lowerCAmelCase : int = ":0"
elif system == "Windows":
lowerCAmelCase : Any = "dshow"
lowerCAmelCase : Optional[Any] = "default"
lowerCAmelCase : Any = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowerCAmelCase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase : Optional[Any] = _ffmpeg_stream(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[Tuple[float, float], float]] = None , SCREAMING_SNAKE_CASE : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase : Tuple = stream_chunk_s
else:
lowerCAmelCase : int = chunk_length_s
lowerCAmelCase : Union[str, Any] = ffmpeg_microphone(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , format_for_conversion=SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowerCAmelCase : List[Any] = np.intaa
lowerCAmelCase : Dict = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Optional[Any] = np.floataa
lowerCAmelCase : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowerCAmelCase : Dict = chunk_length_s / 6
lowerCAmelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
lowerCAmelCase : str = [stride_length_s, stride_length_s]
lowerCAmelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase : Union[str, Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase : str = datetime.datetime.now()
lowerCAmelCase : Optional[Any] = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowerCAmelCase : str = np.frombuffer(item["raw"] , dtype=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowerCAmelCase : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple[int, int] , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = B""
lowerCAmelCase , lowerCAmelCase : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowerCAmelCase : Any = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE ) < chunk_len:
lowerCAmelCase : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase : str = (_stride_left, stride_right)
lowerCAmelCase : str = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowerCAmelCase : int = False
yield item
lowerCAmelCase : Dict = stride_left
lowerCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE ) > stride_left:
lowerCAmelCase : int = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowerCAmelCase : str = False
yield item
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 2**2_4 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowerCAmelCase : Optional[Any] = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 681
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681
| 1
|
'''simple docstring'''
_snake_case : Any = 9.8_0665
def snake_case_ (UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 22
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures''')
class lowercase ( unittest.TestCase ):
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def __snake_case( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __snake_case( cls : Dict ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 403
| 0
|
class _UpperCamelCase :
def __init__( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = {}
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Any:
"""simple docstring"""
if vertex not in self.adjacency:
UpperCamelCase_ = {}
self.num_vertices += 1
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Tuple:
"""simple docstring"""
self.add_vertex(_SCREAMING_SNAKE_CASE )
self.add_vertex(_SCREAMING_SNAKE_CASE )
if head == tail:
return
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def lowercase ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_edges()
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = list(edges[i] )
edges.sort(key=lambda _SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCamelCase_ = edges[i][2] + 1
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def __str__( self: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCamelCase_ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def lowercase ( self: Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowercase ( _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Any=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = Graph()
if vertices is None:
UpperCamelCase_ = []
if edges is None:
UpperCamelCase_ = []
for vertex in vertices:
g.add_vertex(_SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*_SCREAMING_SNAKE_CASE )
return g
class _UpperCamelCase :
def __init__( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = {}
UpperCamelCase_ = {}
def __len__( self: List[Any] ) -> int:
"""simple docstring"""
return len(self.parent )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> Optional[int]:
"""simple docstring"""
if item in self.parent:
return self.find(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = item
UpperCamelCase_ = 0
return item
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str] ) -> List[str]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
UpperCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.find(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.find(_SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCamelCase_ = roota
return roota
return None
@staticmethod
def lowercase ( _SCREAMING_SNAKE_CASE: List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = graph.num_vertices
UpperCamelCase_ = Graph.UnionFind()
UpperCamelCase_ = []
while num_components > 1:
UpperCamelCase_ = {}
for vertex in graph.get_vertices():
UpperCamelCase_ = -1
UpperCamelCase_ = graph.get_edges()
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
UpperCamelCase_ = union_find.find(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = union_find.find(_SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = cheap_edge[vertex]
if union_find.find(_SCREAMING_SNAKE_CASE ) != union_find.find(_SCREAMING_SNAKE_CASE ):
union_find.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
UpperCamelCase_ = num_components - 1
UpperCamelCase_ = Graph.build(edges=_SCREAMING_SNAKE_CASE )
return mst
| 371
|
import random
from typing import Any
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list[Any]:
for _ in range(len(UpperCamelCase_ ) ):
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ = random.randint(0 , len(UpperCamelCase_ ) - 1 )
UpperCamelCase_ , UpperCamelCase_ = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCAmelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 371
| 1
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = np.full((len(lowerCamelCase__ ), sequence_length, 2) , lowerCamelCase__ )
else:
A_ : List[str] = np.full((len(lowerCamelCase__ ), sequence_length) , lowerCamelCase__ )
for i, tensor in enumerate(lowerCamelCase__ ):
if padding_side == "right":
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = tensor[:sequence_length]
else:
A_ : str = tensor[:sequence_length]
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[str] = tensor[:sequence_length]
else:
A_ : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = ord(lowerCamelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
A_ : int = unicodedata.category(lowerCamelCase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : int = -100
__SCREAMING_SNAKE_CASE : str = "pt"
def _a (self , lowercase ):
import torch
A_ : str = """label""" if """label""" in features[0].keys() else """labels"""
A_ : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
A_ : str = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
A_ : int = torch.tensor(batch["""entity_ids"""] ).shape[1]
A_ : Tuple = self.tokenizer.padding_side
if padding_side == "right":
A_ : Tuple = [
list(lowercase ) + [self.label_pad_token_id] * (sequence_length - len(lowercase )) for label in labels
]
else:
A_ : int = [
[self.label_pad_token_id] * (sequence_length - len(lowercase )) + list(lowercase ) for label in labels
]
A_ : Dict = [feature["""ner_tags"""] for feature in features]
A_ : Union[str, Any] = padding_tensor(lowercase , -1 , lowercase , lowercase )
A_ : Tuple = [feature["""original_entity_spans"""] for feature in features]
A_ : Optional[int] = padding_tensor(lowercase , (-1, -1) , lowercase , lowercase )
A_ : Any = {k: torch.tensor(lowercase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 667
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range
| 667
| 1
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def _snake_case ( *_snake_case , **_snake_case) -> str:
pass
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
_lowerCamelCase : Tuple= MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Dict:
UpperCAmelCase_ : Optional[int] = DepthEstimationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _snake_case ( self , _snake_case , _snake_case) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png')
self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , __UpperCamelCase)
import datasets
UpperCAmelCase_ : int = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
UpperCAmelCase_ : Tuple = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
])
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
] , __UpperCamelCase , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF')
def _snake_case ( self) -> str:
pass
@slow
@require_torch
def _snake_case ( self) -> Any:
UpperCAmelCase_ : Any = 'Intel/dpt-large'
UpperCAmelCase_ : int = pipeline('depth-estimation' , model=__UpperCamelCase)
UpperCAmelCase_ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg')
UpperCAmelCase_ : Optional[Any] = hashimage(outputs['depth'])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 29.304)
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.662)
@require_torch
def _snake_case ( self) -> int:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
| 715
|
'''simple docstring'''
import math
class lowercase :
def __init__( self , _snake_case=0) -> Union[str, Any]: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Tuple = n
UpperCAmelCase_ : Optional[Any] = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # adjacency matrix for weight
UpperCAmelCase_ : Tuple = [
[math.inf for j in range(0 , _snake_case)] for i in range(0 , _snake_case)
] # dp[i][j] stores minimum distance from i to j
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = w
def _snake_case ( self) -> str:
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
UpperCAmelCase_ : Optional[int] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def _snake_case ( self , _snake_case , _snake_case) -> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase__ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 471
| 0
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
A = Features({'image': Image()} )
A = Features({'labels': ClassLabel} )
A = "image"
A = "labels"
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :str ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowerCamelCase__ ( self :Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 304
|
"""simple docstring"""
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ = ""
UpperCamelCase__ = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ , UpperCamelCase__ = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ = [1 for i in range(len(_snake_case ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ = 0
for j in range(len(_snake_case ) ):
UpperCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ = j - k + 1 # noqa: E741
UpperCamelCase__ = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ = length[j]
UpperCamelCase__ = j
# create that string
UpperCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 1
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar("""KEY""")
UpperCAmelCase_ = TypeVar("""VAL""")
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class UpperCamelCase__ ( Generic[KEY, VAL] ):
'''simple docstring'''
__a : KEY
__a : VAL
class UpperCamelCase__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
"""simple docstring"""
super().__init__(snake_case__, snake_case__ )
def __bool__( self ) -> bool:
"""simple docstring"""
return False
UpperCAmelCase_ = _DeletedItem()
class UpperCamelCase__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self, snake_case__ = 8, snake_case__ = 0.75 ) -> None:
"""simple docstring"""
lowercase_ : Dict = initial_block_size
lowercase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase_ : Optional[int] = capacity_factor
lowercase_ : Union[str, Any] = 0
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
return hash(snake_case__ ) % len(self._buckets )
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ ) -> bool:
"""simple docstring"""
lowercase_ : Optional[int] = self._buckets[ind]
if not stored:
lowercase_ : List[str] = _Item(snake_case__, snake_case__ )
self._len += 1
return True
elif stored.key == key:
lowercase_ : Tuple = _Item(snake_case__, snake_case__ )
return True
else:
return False
def snake_case__ ( self ) -> bool:
"""simple docstring"""
lowercase_ : Optional[int] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case__ )
def snake_case__ ( self ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase_ : str = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case__ ( self, snake_case__ ) -> None:
"""simple docstring"""
lowercase_ : List[str] = self._buckets
lowercase_ : int = [None] * new_size
lowercase_ : str = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def snake_case__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def snake_case__ ( self ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def snake_case__ ( self, snake_case__ ) -> Iterator[int]:
"""simple docstring"""
lowercase_ : Optional[Any] = self._get_bucket_index(snake_case__ )
for _ in range(len(self._buckets ) ):
yield ind
lowercase_ : int = self._get_next_ind(snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(snake_case__ ):
if self._try_set(snake_case__, snake_case__, snake_case__ ):
break
def __setitem__( self, snake_case__, snake_case__ ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(snake_case__, snake_case__ )
def __delitem__( self, snake_case__ ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(snake_case__ ):
lowercase_ : Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(snake_case__ )
if item is _deleted:
continue
if item.key == key:
lowercase_ : Dict = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self, snake_case__ ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(snake_case__ ):
lowercase_ : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case__ )
def __len__( self ) -> int:
"""simple docstring"""
return self._len
def __iter__( self ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
"""simple docstring"""
lowercase_ : Dict = """ ,""".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 458
|
from __future__ import annotations
from math import gcd
def __magic_name__ ( lowercase , lowercase = 2 , lowercase = 1 , lowercase = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowercase , lowercase , lowercase ) -> int:
return (pow(lowercase , 2 ) + step) % modulus
for _ in range(lowercase ):
# These track the position within the cycle detection logic.
lowercase_ : List[Any] = seed
lowercase_ : Any = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase_ : List[str] = rand_fn(lowercase , lowercase , lowercase )
lowercase_ : str = rand_fn(lowercase , lowercase , lowercase )
lowercase_ : Any = rand_fn(lowercase , lowercase , lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase_ : Tuple = gcd(hare - tortoise , lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase_ : int = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
UpperCAmelCase_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 458
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowercase ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''gpt_neo'''
UpperCAmelCase_ : str = ['''past_key_values''']
UpperCAmelCase_ : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , __UpperCAmelCase=5_02_57 , __UpperCAmelCase=20_48 , __UpperCAmelCase=20_48 , __UpperCAmelCase=24 , __UpperCAmelCase=[[["global", "local"], 12]] , __UpperCAmelCase=16 , __UpperCAmelCase=None , __UpperCAmelCase=2_56 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=5_02_56 , **__UpperCAmelCase , ) -> Dict:
A : Tuple = vocab_size
A : Tuple = max_position_embeddings
A : List[Any] = hidden_size
A : int = num_layers
A : Dict = num_heads
A : Optional[int] = intermediate_size
A : List[Any] = window_size
A : Optional[Any] = activation_function
A : str = resid_dropout
A : str = embed_dropout
A : Union[str, Any] = attention_dropout
A : Any = classifier_dropout
A : Tuple = layer_norm_epsilon
A : Any = initializer_range
A : Optional[int] = use_cache
A : List[str] = bos_token_id
A : int = eos_token_id
A : Any = attention_types
A : Tuple = self.expand_attention_types_params(__a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@staticmethod
def snake_case ( __UpperCAmelCase ) -> Tuple:
A : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
import torch
A : str = input.size()
A : str = len(lowerCamelCase_ )
A : List[Any] = shape[dimension]
A : int = torch.arange(0 , lowerCamelCase_ , lowerCamelCase_ )
A : List[Any] = torch.div(sizedim - size , lowerCamelCase_ , rounding_mode='''floor''' ) + 1
A : Dict = torch.arange(lowerCamelCase_ ) + low_indices[:min_length][:, None]
A : int = [slice(lowerCamelCase_ )] * rank
A : Dict = indices
A : List[str] = input[s]
A : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
import torch
A : Tuple = torch.arange(1 , lowerCamelCase_ )
A : Tuple = torch.remainder(lowerCamelCase_ , lowerCamelCase_ )
A : Optional[Any] = remainders == 0
A : Optional[int] = candidates[divisor_indices]
A : Tuple = torch.max(lowerCamelCase_ )
return largest_divisor, torch.div(lowerCamelCase_ , lowerCamelCase_ , rounding_mode='''floor''' )
class __lowercase ( UpperCamelCase_ ):
"""simple docstring"""
@property
def snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
A : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction='''inputs''' )
A : List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
A : Dict = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case ( self ) -> int:
return self._config.num_heads
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
A : int = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
A : str = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A : Optional[Any] = seqlen + 2
A : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : List[Any] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
A : Any = common_inputs['attention_mask']
if self.use_past:
A : Any = ordered_inputs['attention_mask'].dtype
A : Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def snake_case ( self ) -> int:
return 13
| 717
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
A : List[Any] = num_channels
A : Optional[Any] = patch_sizes
A : Optional[int] = patch_stride
A : Union[str, Any] = patch_padding
A : Union[str, Any] = embed_dim
A : Optional[Any] = num_heads
A : Any = depth
A : Optional[int] = mlp_ratio
A : Optional[int] = attention_drop_rate
A : Optional[int] = drop_rate
A : Tuple = drop_path_rate
A : Tuple = qkv_bias
A : Dict = cls_token
A : List[Any] = qkv_projection_method
A : Union[str, Any] = kernel_qkv
A : Union[str, Any] = padding_kv
A : Union[str, Any] = stride_kv
A : str = padding_q
A : Union[str, Any] = stride_q
A : Dict = initializer_range
A : List[str] = layer_norm_eps
| 423
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.