code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import sys
UpperCamelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = 1
for digit in s:
product *= int(__A )
return product
def lowerCAmelCase_ ( __A = N ) -> int:
'''simple docstring'''
UpperCAmelCase__ = -sys.maxsize - 1
UpperCAmelCase__ = n[:13]
UpperCAmelCase__ = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
UpperCAmelCase__ = substr[1:] + n[cur_index]
cur_index += 1
else:
UpperCAmelCase__ = max(__A, str_eval(__A ) )
UpperCAmelCase__ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 65
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : str = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : str = mock.Mock()
lowerCAmelCase_ : Optional[Any] = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = HTTPError
lowerCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase ) as mock_head:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Dict ) -> Any:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ) -> str:
lowerCAmelCase_ : Dict = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __lowercase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""test-feature-extractor""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ : Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowerCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 120
| 0
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = 3
a_ = 2_50
a_ = ids_tensor((batch_size, length) , __UpperCAmelCase)
a_ = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float) / length
return input_ids, scores
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ , a_ = self._get_tensors(5)
a_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->int:
a_ = MaxLengthCriteria(max_length=10)
a_ , a_ = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Tuple:
a_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
a_ , a_ = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ , a_ = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def UpperCAmelCase__ ( self) ->List[str]:
a_ , a_ = self._get_tensors(5)
a_ = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def UpperCAmelCase__ ( self) ->Any:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(__UpperCAmelCase):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
a_ = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(__UpperCAmelCase) , 1)
| 363
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}'''
| 303
| 0
|
'''simple docstring'''
def a_ ( ):
return 1
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCamelCase )
def a_ ( lowerCamelCase : int = 200 ):
return two_pound(lowerCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 4
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4
| 1
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCamelCase_ ( A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = VideoMAEConfig()
set_architecture_configs(A__ , A__ )
if "finetuned" not in model_name:
lowerCAmelCase_ : Any = False
if "finetuned" in model_name:
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
if "kinetics" in model_name:
lowerCAmelCase_ : Any = 4_00
lowerCAmelCase_ : Optional[Any] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
lowerCAmelCase_ : Optional[Any] = 1_74
lowerCAmelCase_ : Dict = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[Any] = idalabel
lowerCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ):
'''simple docstring'''
if "small" in model_name:
lowerCAmelCase_ : List[str] = 3_84
lowerCAmelCase_ : List[str] = 15_36
lowerCAmelCase_ : Tuple = 12
lowerCAmelCase_ : Dict = 16
lowerCAmelCase_ : List[Any] = 12
lowerCAmelCase_ : Any = 3
lowerCAmelCase_ : str = 1_92
lowerCAmelCase_ : List[Any] = 7_68
elif "large" in model_name:
lowerCAmelCase_ : Dict = 10_24
lowerCAmelCase_ : Union[str, Any] = 40_96
lowerCAmelCase_ : Dict = 24
lowerCAmelCase_ : Optional[Any] = 16
lowerCAmelCase_ : Optional[Any] = 12
lowerCAmelCase_ : List[Any] = 8
lowerCAmelCase_ : Union[str, Any] = 5_12
lowerCAmelCase_ : str = 20_48
elif "huge" in model_name:
lowerCAmelCase_ : Optional[Any] = 12_80
lowerCAmelCase_ : List[Any] = 51_20
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : Optional[Any] = 16
lowerCAmelCase_ : Optional[Any] = 12
lowerCAmelCase_ : List[Any] = 8
lowerCAmelCase_ : int = 6_40
lowerCAmelCase_ : int = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
if "encoder." in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
lowerCAmelCase_ : int = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase_ : str = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ : str = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase_ : str = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
lowerCAmelCase_ : Dict = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase_ : str = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase_ : str = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase_ : int = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ : Tuple = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ : List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
lowerCAmelCase_ : Dict = name.replace("""head""" , """classifier""" )
return name
def UpperCamelCase_ ( A__ : str , A__ : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Any = orig_state_dict.pop(A__ )
if key.startswith("""encoder.""" ):
lowerCAmelCase_ : List[Any] = key.replace("""encoder.""" , """""" )
if "qkv" in key:
lowerCAmelCase_ : str = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
lowerCAmelCase_ : Dict = config.decoder_hidden_size
lowerCAmelCase_ : Tuple = int(key_split[2] )
lowerCAmelCase_ : List[Any] = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase_ : Optional[int] = val[:dim, :]
lowerCAmelCase_ : Optional[Any] = val[dim : dim * 2, :]
lowerCAmelCase_ : str = val[-dim:, :]
else:
lowerCAmelCase_ : str = config.hidden_size
lowerCAmelCase_ : Optional[Any] = int(key_split[1] )
lowerCAmelCase_ : int = """videomae.encoder.layer."""
if "weight" in key:
lowerCAmelCase_ : List[str] = val[:dim, :]
lowerCAmelCase_ : Tuple = val[dim : dim * 2, :]
lowerCAmelCase_ : int = val[-dim:, :]
else:
lowerCAmelCase_ : List[Any] = val
return orig_state_dict
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowerCAmelCase_ : int = np.load(A__ )
return list(A__ )
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any] , A__ : List[str] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = get_videomae_config(A__ )
if "finetuned" in model_name:
lowerCAmelCase_ : Optional[Any] = VideoMAEForVideoClassification(A__ )
else:
lowerCAmelCase_ : Dict = VideoMAEForPreTraining(A__ )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase_ : Dict = """pytorch_model.bin"""
gdown.cached_download(A__ , A__ , quiet=A__ )
lowerCAmelCase_ : Tuple = torch.load(A__ , map_location="""cpu""" )
if "model" in files:
lowerCAmelCase_ : Optional[int] = files["""model"""]
else:
lowerCAmelCase_ : Union[str, Any] = files["""module"""]
lowerCAmelCase_ : Dict = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
model.eval()
# verify model on basic input
lowerCAmelCase_ : List[str] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase_ : Dict = prepare_video()
lowerCAmelCase_ : Tuple = image_processor(A__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
lowerCAmelCase_ : int = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowerCAmelCase_ : List[Any] = torch.load(A__ )
lowerCAmelCase_ : Union[str, Any] = model(**A__ )
lowerCAmelCase_ : List[str] = outputs.logits
lowerCAmelCase_ : Dict = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase_ : int = torch.Size([1, 4_00] )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase_ : Union[str, Any] = torch.Size([1, 1_74] )
lowerCAmelCase_ : str = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase_ : str = torch.Size([1, 14_08, 15_36] )
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase_ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
lowerCAmelCase_ : List[str] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase_ : int = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase_ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
lowerCAmelCase_ : str = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase_ : Optional[int] = torch.Size([1, 4_00] )
lowerCAmelCase_ : Dict = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase_ : int = torch.Size([1, 4_00] )
lowerCAmelCase_ : List[str] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase_ : List[Any] = torch.Size([1, 4_00] )
lowerCAmelCase_ : Optional[Any] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase_ : List[Any] = torch.Size([1, 4_00] )
lowerCAmelCase_ : Dict = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase_ : Dict = torch.Size([1, 14_08, 15_36] )
lowerCAmelCase_ : Dict = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase_ : int = torch.Size([1, 1_74] )
lowerCAmelCase_ : List[str] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase_ : Dict = torch.Size([1, 14_08, 15_36] )
lowerCAmelCase_ : Any = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase_ : int = torch.Size([1, 1_74] )
lowerCAmelCase_ : List[str] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , A__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase_ : Any = outputs.loss
assert torch.allclose(A__ , A__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
model.save_pretrained(A__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(A__ , organization="""nielsr""" )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 89
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase_ ( A__ : bytes , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = f'{sampling_rate}'
lowerCAmelCase_ : str = """1"""
lowerCAmelCase_ : Optional[int] = """f32le"""
lowerCAmelCase_ : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase_ : Optional[int] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCAmelCase_ : Optional[Any] = output_stream[0]
lowerCAmelCase_ : Optional[int] = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def UpperCamelCase_ ( A__ : int , A__ : float , A__ : str = "f32le" , ):
'''simple docstring'''
lowerCAmelCase_ : int = f'{sampling_rate}'
lowerCAmelCase_ : Any = """1"""
if format_for_conversion == "s16le":
lowerCAmelCase_ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase_ : Union[str, Any] = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowerCAmelCase_ : int = platform.system()
if system == "Linux":
lowerCAmelCase_ : int = """alsa"""
lowerCAmelCase_ : int = """default"""
elif system == "Darwin":
lowerCAmelCase_ : List[str] = """avfoundation"""
lowerCAmelCase_ : Union[str, Any] = """:0"""
elif system == "Windows":
lowerCAmelCase_ : List[Any] = """dshow"""
lowerCAmelCase_ : Union[str, Any] = """default"""
lowerCAmelCase_ : Tuple = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCAmelCase_ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase_ : List[str] = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def UpperCamelCase_ ( A__ : int , A__ : float , A__ : Optional[int] = None , A__ : Optional[Union[Tuple[float, float], float]] = None , A__ : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase_ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase_ : Tuple = chunk_length_s
lowerCAmelCase_ : List[Any] = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
lowerCAmelCase_ : Tuple = np.intaa
lowerCAmelCase_ : List[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase_ : Dict = np.floataa
lowerCAmelCase_ : int = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowerCAmelCase_ : Optional[Any] = chunk_length_s / 6
lowerCAmelCase_ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
lowerCAmelCase_ : int = [stride_length_s, stride_length_s]
lowerCAmelCase_ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase_ : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase_ : Dict = datetime.datetime.now()
lowerCAmelCase_ : Any = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
lowerCAmelCase_ : Optional[int] = np.frombuffer(item["""raw"""] , dtype=A__ )
lowerCAmelCase_ : Dict = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCAmelCase_ : Dict = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase_ ( A__ : Any , A__ : int , A__ : Tuple[int, int] , A__ : bool = False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = B""""""
lowerCAmelCase_, lowerCAmelCase_ : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowerCAmelCase_ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
lowerCAmelCase_ : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase_ : Optional[Any] = (_stride_left, stride_right)
lowerCAmelCase_ : List[str] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCAmelCase_ : List[Any] = False
yield item
lowerCAmelCase_ : str = stride_left
lowerCAmelCase_ : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
lowerCAmelCase_ : Tuple = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCAmelCase_ : Optional[Any] = False
yield item
def UpperCamelCase_ ( A__ : List[str] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
lowerCAmelCase_ : Union[str, Any] = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 89
| 1
|
# flake8: noqa
# Lint as: python3
snake_case : Optional[Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 281
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281
| 1
|
from __future__ import annotations
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> tuple[float, list[float]]:
'''simple docstring'''
UpperCAmelCase_ = list(range(len(__UpperCAmelCase ) ) )
UpperCAmelCase_ = [v / w for v, w in zip(__UpperCAmelCase , __UpperCAmelCase )]
index.sort(key=lambda __UpperCAmelCase : ratio[i] , reverse=__UpperCAmelCase )
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0] * len(__UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Dict = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples['''text'''] )
return fn
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=lowercase )
__lowercase = tf.train.Example(features=lowercase )
__lowercase = example.SerializeToString()
records.append(lowercase )
return records
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(lowercase ) , args.limit )
__lowercase = dataset.select(range(lowercase ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(lowercase )
__lowercase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(lowercase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowercase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=lowercase )
if __name__ == "__main__":
__a : Optional[Any] = parse_args()
main(args)
| 210
| 0
|
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" ,):
_A : int = set()
# Replace all the whitespace in our sentence
_A : Tuple = input_str.replace(' ' ,'' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCamelCase ) == 26
def lowerCAmelCase__ ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" ,):
_A : List[Any] = [False] * 26
for char in input_str:
if char.islower():
_A : Optional[Any] = True
elif char.isupper():
_A : int = True
return all(lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" ,):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowerCAmelCase__ ( ):
from timeit import timeit
_A : Tuple = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' ,setup=lowerCamelCase ) )
print(timeit('is_pangram_faster()' ,setup=lowerCamelCase ) )
print(timeit('is_pangram_fastest()' ,setup=lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 227
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A : Union[str, Any] , _A : Tuple , _A : str , _A : Optional[Any]="attention" ) ->str:
"""simple docstring"""
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : Dict , _A : Dict=False ) ->Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCamelCase_ =(wi_a, wi_a)
else:
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCamelCase_ =params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( _A : str , _A : Optional[int] , _A : List[Any] , _A : Tuple ) ->int:
"""simple docstring"""
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( _A : Union[str, Any] , *, _A : List[Any] , _A : Any ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase_ ={'''/'''.join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase_ ='''encoder/layers_0/mlp/wi_0/kernel''' in old
print("""Split MLP:""" , _A )
lowerCamelCase_ =collections.OrderedDict()
# Shared embeddings.
lowerCamelCase_ =old['''token_embedder/embedding''']
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase_ =tax_attention_lookup(_A , _A , """encoder""" , """attention""" )
lowerCamelCase_ =layer_norm
lowerCamelCase_ =k.T
lowerCamelCase_ =o.T
lowerCamelCase_ =q.T
lowerCamelCase_ =v.T
# Block i, layer 1 (MLP).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase_ =tax_mlp_lookup(_A , _A , """encoder""" , _A )
lowerCamelCase_ =layer_norm
if split_mlp_wi:
lowerCamelCase_ =wi[0].T
lowerCamelCase_ =wi[1].T
else:
lowerCamelCase_ =wi.T
lowerCamelCase_ =wo.T
lowerCamelCase_ =old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowerCamelCase_ =old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase_ =tax_attention_lookup(_A , _A , """decoder""" , """self_attention""" )
lowerCamelCase_ =layer_norm
lowerCamelCase_ =k.T
lowerCamelCase_ =o.T
lowerCamelCase_ =q.T
lowerCamelCase_ =v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase_ =tax_attention_lookup(_A , _A , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase_ =layer_norm
lowerCamelCase_ =k.T
lowerCamelCase_ =o.T
lowerCamelCase_ =q.T
lowerCamelCase_ =v.T
# Block i, layer 2 (MLP).
lowerCamelCase_ =tax_layer_norm_lookup(_A , _A , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase_ =tax_mlp_lookup(_A , _A , """decoder""" , _A )
lowerCamelCase_ =layer_norm
if split_mlp_wi:
lowerCamelCase_ =wi[0].T
lowerCamelCase_ =wi[1].T
else:
lowerCamelCase_ =wi.T
lowerCamelCase_ =wo.T
lowerCamelCase_ =old['''decoder/decoder_norm/scale''']
lowerCamelCase_ =old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase_ =old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCamelCase ( _A : str , _A : List[str] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ =state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase_ =state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase_ =state_dict['''shared.weight''']
return state_dict
def __UpperCamelCase ( _A : Union[str, Any] , _A : str , _A : List[str] , _A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(_A )
lowerCamelCase_ =convert_tax_to_pytorch(_A , num_layers=config.num_layers , is_encoder_only=_A )
lowerCamelCase_ =make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def __UpperCamelCase ( _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : int = False ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =TaConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase_ =TaEncoderModel(_A )
else:
lowerCamelCase_ =TaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("""Done""" )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__A : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 154
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCamelCase ( a_ : int , a_ : int , a_ : bool , a_ : list[int] , a_ : float ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(a_ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , a_ , a_ , a_ ) , minimax(depth + 1 , node_index * 2 + 1 , a_ , a_ , a_ ) , )
def __lowerCamelCase ( ) -> None:
__SCREAMING_SNAKE_CASE :Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
__SCREAMING_SNAKE_CASE :Optional[int] = math.log(len(a_ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , a_ , a_ , a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 239
|
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 10 , a_ : int = 22 ) -> int:
__SCREAMING_SNAKE_CASE :Optional[int] = range(1 , a_ )
__SCREAMING_SNAKE_CASE :List[Any] = range(1 , a_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }')
| 239
| 1
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
for i in range(len(lowerCAmelCase_ ) - 1 , 0 , -1 ):
_a : List[Any] = False
for j in range(lowerCAmelCase_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_a , _a : Tuple = unsorted[j - 1], unsorted[j]
_a : Any = True
for j in range(lowerCAmelCase_ ):
if unsorted[j] > unsorted[j + 1]:
_a , _a : Any = unsorted[j + 1], unsorted[j]
_a : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 89
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__( ):
lowercase_ : str = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase_ : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
return image
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : int = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ : List[str] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ : Tuple = torch.cat((q_bias, torch.zeros_like(__SCREAMING_SNAKE_CASE , requires_grad=__SCREAMING_SNAKE_CASE ), v_bias) )
lowercase_ : str = qkv_bias
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : str = 3_64 if 'coco' in model_name else 2_24
lowercase_ : int = BlipaVisionConfig(image_size=__SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase_ : Tuple = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
lowercase_ : Optional[int] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
lowercase_ : int = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase_ : List[Any] = BlipaConfig(vision_config=__SCREAMING_SNAKE_CASE , text_config=__SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
lowercase_ : Dict = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase_ : Any = tokenizer('\n' , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids[0]
lowercase_ : Any = get_blipa_config(__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = BlipaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : str = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase_ : Any = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase_ : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase_ : List[str] = load_model_and_preprocess(
name=__SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , is_eval=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase_ : Dict = original_model.state_dict()
lowercase_ : List[str] = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ : Any = state_dict.pop(__SCREAMING_SNAKE_CASE )
if key.startswith('Qformer.bert' ):
lowercase_ : Optional[Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase_ : Optional[int] = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase_ : List[Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase_ : List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase_ : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase_ : Tuple = key.replace('t5' , 'language' )
lowercase_ : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = hf_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase_ : Tuple = load_demo_image()
lowercase_ : Any = vis_processors['eval'](__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
# create processor
lowercase_ : List[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = BlipaProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
hf_model.to(__SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
lowercase_ : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase_ : str = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).logits
else:
lowercase_ : str = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase_ : int = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase_ : Optional[Any] = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase_ : Union[str, Any] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase_ : Optional[int] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__SCREAMING_SNAKE_CASE )
else:
# cast to same type
lowercase_ : List[Any] = logits.dtype
assert torch.allclose(original_logits.to(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase_ : Union[str, Any] = ''
lowercase_ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = original_model.generate({'image': original_pixel_values} )
lowercase_ : Any = hf_model.generate(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = input_ids.shape[1]
lowercase_ : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = [text.strip() for text in output_text]
print('HF generation:' , __SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE =[
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 363
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase_ : Tuple = {'unk_token': '<unk>'}
lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
lowercase_ : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase )
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 )
lowercase_ : Any = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = self.prepare_image_inputs()
lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' )
lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = self.get_image_processor()
lowercase_ : List[Any] = self.get_tokenizer()
lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Dict = 'lower newer'
lowercase_ : Any = processor(text=__UpperCamelCase )
lowercase_ : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.get_image_processor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : List[Any] = 'lower newer'
lowercase_ : str = self.prepare_image_inputs()
lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = self.prepare_image_inputs()
lowercase_ : Optional[Any] = self.prepare_image_inputs()
lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.get_image_processor()
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 321
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 343
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return True
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
| 343
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_tool('text-to-speech' )
self.tool.setup()
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = self.tool('hey' )
lowerCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = self.tool('hey' )
lowerCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122
| 0
|
from math import factorial
_lowercase: int = {str(d): factorial(d) for d in range(10)}
def a( A : int ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(A ) )
def a( ) -> int:
"""simple docstring"""
a = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , A ) if sum_of_digit_factorial(A ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 227
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a( A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a = MBartConfig(
is_decoder=A , is_encoder_decoder=A , add_cross_attention=A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A , add_final_layer_norm=A , )
return encoder_config, decoder_config
def a( A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
a = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
a = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a = "encoder." + name
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
a = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a = "encoder.layernorm.bias"
return name
def a( A : Union[str, Any] , A : Tuple ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(A )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def a( A : List[Any] , A : Tuple=None , A : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
a = DonutModel.from_pretrained(A ).eval()
# load HuggingFace model
a , a = get_configs(A )
a = DonutSwinModel(A )
a = MBartForCausalLM(A )
a = VisionEncoderDecoderModel(encoder=A , decoder=A )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(A , A )
model.load_state_dict(A )
# verify results on scanned document
a = load_dataset("hf-internal-testing/example-documents" )
a = dataset["test"][0]["image"].convert("RGB" )
a = XLMRobertaTokenizerFast.from_pretrained(A , from_slow=A )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a = DonutProcessor(A , A )
a = processor(A , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a = "When is the coffee break?"
a = task_prompt.replace("{user_input}" , A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = "hello world"
else:
raise ValueError("Model name not supported" )
a = original_model.decoder.tokenizer(A , add_special_tokens=A , return_tensors="pt" )[
"input_ids"
]
a = original_model.encoder.model.patch_embed(A )
a , a = model.encoder.embeddings(A )
assert torch.allclose(A , A , atol=1e-3 )
# verify encoder hidden states
a = original_model.encoder(A )
a = model.encoder(A ).last_hidden_state
assert torch.allclose(A , A , atol=1e-2 )
# verify decoder hidden states
a = original_model(A , A , A ).logits
a = model(A , decoder_input_ids=A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowercase: Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227
| 1
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__snake_case = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__snake_case = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__snake_case = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 , UpperCamelCase__ = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase__ , hypotheses=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ )
}
| 112
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( lowercase : list[float] ) -> Any:
"""simple docstring"""
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 112
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 239
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = PriorTransformer
UpperCamelCase__ = '''hidden_states'''
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = 4
lowercase_ : int = 8
lowercase_ : Union[str, Any] = 7
lowercase_ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any]=0 ):
torch.manual_seed(lowercase_ )
lowercase_ : int = 4
lowercase_ : Any = 8
lowercase_ : Tuple = 7
lowercase_ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
lowercase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ , lowercase_ : Tuple = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase_ )
lowercase_ : Optional[int] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.prepare_init_args_and_inputs_for_common()
lowercase_ : List[Any] = self.model_class(**lowercase_ )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
lowercase_ : Any = model.to(lowercase_ )
if hasattr(lowercase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
lowercase_ : Any = self.get_dummy_seed_input()
with torch.no_grad():
lowercase_ : List[str] = model(**lowercase_ )[0]
lowercase_ : Tuple = output[0, :5].flatten().cpu()
print(lowercase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase_ : Dict = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) )
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=77 , lowercase_ : Optional[int]=0 ):
torch.manual_seed(lowercase_ )
lowercase_ : Optional[Any] = batch_size
lowercase_ : int = embedding_dim
lowercase_ : int = num_embeddings
lowercase_ : Dict = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] ):
lowercase_ : List[Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowercase_ )
lowercase_ : Optional[Any] = self.get_dummy_seed_input(seed=lowercase_ )
with torch.no_grad():
lowercase_ : Tuple = model(**lowercase_ )[0]
assert list(sample.shape ) == [1, 768]
lowercase_ : Union[str, Any] = sample[0, :8].flatten().cpu()
print(lowercase_ )
lowercase_ : Optional[Any] = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1E-3 )
| 239
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=_UpperCAmelCase ):
lowerCAmelCase : int = ["""keras_nlp"""]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["keras_nlp"] )
| 198
|
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198
| 1
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
SCREAMING_SNAKE_CASE__ : Optional[int] = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
return False
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 132
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321
| 0
|
from collections import namedtuple
_UpperCAmelCase : Any = namedtuple("""from_to""", """from_ to""")
_UpperCAmelCase : Any = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 10_00),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(_UpperCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(_UpperCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["""PerceiverFeatureExtractor"""]
_UpperCAmelCase : Dict = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=3 , __UpperCamelCase=1_0 , __UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embeddings_size
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = depths
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_labels
UpperCamelCase_ = scope
UpperCamelCase_ = len(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModel(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = FlaxRegNetForImageClassification(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ : Any = False
A__ : List[Any] = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase , **__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : int = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 69
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.get_config()
A__ = 3_00
return config
def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = DebertaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0]
A__ = model(UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ):
"""simple docstring"""
A__ = DebertaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = DebertaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 69
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int = None , lowerCAmelCase__ : int = None ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = max_length
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab
__SCREAMING_SNAKE_CASE : List[Any] = merges
__SCREAMING_SNAKE_CASE : Any = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls : List[Any] , lowerCAmelCase__ : GPTaTokenizer , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = [""" """.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
__SCREAMING_SNAKE_CASE : Dict = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls : Optional[Any] , lowerCAmelCase__ : Union[str, os.PathLike] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return cls(**lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.tf_tokenizer(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__SCREAMING_SNAKE_CASE : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 112
|
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Any = []
for arabic, roman in ROMAN:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
return max(metric_fn(_snake_case , _snake_case ) for gt in ground_truths )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
snake_case_ = []
if args.gold_data_mode == "qa":
snake_case_ = pd.read_csv(_snake_case , sep='''\t''' , header=_snake_case )
for answer_list in data[1]:
snake_case_ = ast.literal_eval(_snake_case )
answers.append(_snake_case )
else:
snake_case_ = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
snake_case_ = [[reference] for reference in references]
snake_case_ = 0
for prediction, ground_truths in zip(_snake_case , _snake_case ):
total += 1
em += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
fa += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
snake_case_ = 1_0_0.0 * em / total
snake_case_ = 1_0_0.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = args.k
snake_case_ = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
snake_case_ = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
snake_case_ = 0
for hypo, reference in zip(_snake_case , _snake_case ):
snake_case_ = set(hypo.split('''\t''' )[:k] )
snake_case_ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ = 1_0_0.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
def strip_title(SCREAMING_SNAKE_CASE ):
if title.startswith('''"''' ):
snake_case_ = title[1:]
if title.endswith('''"''' ):
snake_case_ = title[:-1]
return title
snake_case_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case , )['''input_ids'''].to(args.device )
snake_case_ = rag_model.rag.question_encoder(_snake_case )
snake_case_ = question_enc_outputs[0]
snake_case_ = rag_model.retriever(
_snake_case , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ = []
for docs in all_docs:
snake_case_ = [strip_title(_snake_case ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_snake_case ) )
return provenance_strings
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
with torch.no_grad():
snake_case_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case )
snake_case_ = inputs_dict.input_ids.to(args.device )
snake_case_ = inputs_dict.attention_mask.to(args.device )
snake_case_ = rag_model.generate( # rag_model overwrites generate
_snake_case , attention_mask=_snake_case , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_snake_case , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ = rag_model.retriever.generator_tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
if args.print_predictions:
for q, a in zip(_snake_case , _snake_case ):
logger.info('''Q: {} - A: {}'''.format(_snake_case , _snake_case ) )
return answers
def __lowerCAmelCase ()-> int:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_snake_case , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_snake_case , choices=['''exact''', '''compressed''', '''legacy'''] , type=_snake_case , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_snake_case , type=_snake_case , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_snake_case , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_snake_case , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_snake_case , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_snake_case , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_snake_case , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_snake_case , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_snake_case , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_snake_case , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_snake_case , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case_ = parser.parse_args()
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = {}
if args.model_type is None:
snake_case_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case_ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case_ = args.n_docs
if args.index_name is not None:
snake_case_ = args.index_name
if args.index_path is not None:
snake_case_ = args.index_path
else:
snake_case_ = BartForConditionalGeneration
snake_case_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _snake_case )
snake_case_ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case_ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_snake_case ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case_ = RagRetriever.from_pretrained(_snake_case , **_snake_case )
snake_case_ = model_class.from_pretrained(_snake_case , retriever=_snake_case , **_snake_case )
model.retriever.init_retrieval()
else:
snake_case_ = model_class.from_pretrained(_snake_case , **_snake_case )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case_ = []
for line in tqdm(_snake_case ):
questions.append(line.strip() )
if len(_snake_case ) == args.eval_batch_size:
snake_case_ = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) + '''\n''' )
preds_file.flush()
snake_case_ = []
if len(_snake_case ) > 0:
snake_case_ = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) )
preds_file.flush()
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase = get_args()
main(args)
| 352
|
from __future__ import annotations
import time
UpperCAmelCase = list[tuple[int, int]]
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = parent
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = [self.start]
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.node_queue:
snake_case_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(_UpperCAmelCase )
snake_case_ = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case_ = self.fwd_bfs.node_queue.pop(0 )
snake_case_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case_ = True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.fwd_bfs.retrace_path(_UpperCAmelCase )
snake_case_ = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = BreadthFirstSearch(init, goal)
UpperCAmelCase = bfs.search()
UpperCAmelCase = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase = bd_bfs.search()
UpperCAmelCase = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 267
| 0
|
'''simple docstring'''
import requests
__a: Union[str, Any] = """""" # <-- Put your OpenWeatherMap appid here!
__a: Tuple = """https://api.openweathermap.org/data/2.5/"""
def __UpperCamelCase ( UpperCAmelCase = "Chicago" , UpperCAmelCase = APPID ):
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def __UpperCamelCase ( UpperCAmelCase = "Kolkata, India" , UpperCAmelCase = APPID ):
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def __UpperCamelCase ( UpperCAmelCase = 5_5.6_8 , UpperCAmelCase = 1_2.5_7 , UpperCAmelCase = APPID ):
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__a: List[str] = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 198
|
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None ) -> str:
lowercase__ : Any = value
lowercase__ : Union[str, Any] = random()
lowercase__ : Node | None = None
lowercase__ : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
lowercase__ : Any = str(self.value ) + ''' '''
lowercase__ : str = str(self.left or '''''' )
lowercase__ : List[str] = str(self.right or '''''' )
return value + left + right
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase__ , lowercase__ : int = split(root.left , UpperCAmelCase )
return left, root
else:
lowercase__ , lowercase__ : Optional[Any] = split(root.right , UpperCAmelCase )
return root, right
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase__ : Any = merge(left.right , UpperCAmelCase )
return left
else:
lowercase__ : Dict = merge(UpperCAmelCase , right.left )
return right
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = Node(UpperCAmelCase )
lowercase__ , lowercase__ : Tuple = split(UpperCAmelCase , UpperCAmelCase )
return merge(merge(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : Dict = split(UpperCAmelCase , value - 1 )
lowercase__ , lowercase__ : int = split(UpperCAmelCase , UpperCAmelCase )
return merge(UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
for arg in args.split():
if arg[0] == "+":
lowercase__ : Tuple = insert(UpperCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
lowercase__ : Union[str, Any] = erase(UpperCAmelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __UpperCamelCase ( ):
lowercase__ : Tuple = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase__ : Dict = input()
while args != "q":
lowercase__ : Any = interact_treap(UpperCAmelCase , UpperCAmelCase )
print(UpperCAmelCase )
lowercase__ : List[Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 361
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 0 , _a = False , _a = 1 , ):
super().__init__()
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
__a = nn.BatchNormad(_a )
__a = nn.ReLU()
def __UpperCAmelCase ( self , _a ):
__a = self.conv(_a )
__a = self.batch_norm(_a )
__a = self.activation(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = input
for layer in self.layers:
__a = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(_a ):
__a = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = []
for ppm in self.blocks:
__a = ppm(_a )
__a = nn.functional.interpolate(
_a , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(_a , self.channels , kernel_size=1 )
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
__a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(_a ) )
__a = torch.cat(_a , dim=1 )
__a = self.bottleneck(_a )
return output
def __UpperCAmelCase ( self , _a ):
# build laterals
__a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
__a = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__a = torch.cat(_a , dim=1 )
__a = self.fpn_bottleneck(_a )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = 2 , _a = 3 , _a = 1 ):
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*_a )
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
# just take the relevant feature maps
__a = encoder_hidden_states[self.in_index]
__a = self.convs(_a )
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = UperNetConfig
__UpperCAmelCase : Union[str, Any] = 'pixel_values'
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self , _a , _a=False ):
if isinstance(_a , _a ):
__a = value
lowercase_ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__a = UperNetHead(_a , in_channels=self.backbone.channels )
__a = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
__a = outputs.feature_maps
__a = self.decode_head(_a )
__a = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(_a )
__a = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__a = loss_fct(_a , _a )
__a = loss_fct(_a , _a )
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 45
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(lowerCAmelCase__ ) == 1:
return True
__a = series[1] - series[0]
for index in range(len(lowerCAmelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowercase ( lowerCAmelCase__ : list ) -> float:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__a = 0
for val in series:
answer += val
return answer / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=3 , _a=3_2 , _a=3 , _a=1_0 , _a=[8, 1_6, 3_2, 6_4] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=1 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Any = num_channels
_a : Any = embeddings_size
_a : Optional[Any] = hidden_sizes
_a : Dict = depths
_a : List[Any] = is_training
_a : List[Any] = use_labels
_a : Dict = hidden_act
_a : str = num_labels
_a : Optional[Any] = scope
_a : List[Any] = len(_a )
_a : Optional[Any] = out_features
_a : Dict = out_indices
_a : Optional[Any] = num_groups
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> Tuple:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __lowercase ( self , _a , _a , _a ) -> Dict:
_a : Optional[int] = BitModel(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __lowercase ( self , _a , _a , _a ) -> List[Any]:
_a : List[str] = self.num_labels
_a : str = BitForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : str = BitBackbone(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : List[Any] = None
_a : Tuple = BitBackbone(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.prepare_config_and_inputs()
_a , _a , _a : Dict = config_and_inputs
_a : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> str:
_a : Optional[int] = BitModelTester(self )
_a : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> str:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __lowercase ( self ) -> Tuple:
pass
def __lowercase ( self ) -> Tuple:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(_a )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Any = [*signature.parameters.keys()]
_a : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __lowercase ( self ) -> List[Any]:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __lowercase ( self ) -> List[Any]:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a : Tuple = layer_type
_a : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> str:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[str] = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __lowercase ( self ) -> str:
_a : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_a : Union[str, Any] = self.default_image_processor
_a : str = prepare_img()
_a : List[str] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : Optional[int] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = BitConfig
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
_a : Optional[Any] = BitModelTester(self )
| 15
|
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
while a != 0:
snake_case_ , snake_case_ = b % a, a
return b
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
if gcd(UpperCAmelCase , UpperCAmelCase ) != 1:
snake_case_ = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = 1, 0, a
snake_case_ , snake_case_ , snake_case_ = 0, 1, m
while va != 0:
snake_case_ = ua // va
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 69
| 1
|
'''simple docstring'''
import requests
_SCREAMING_SNAKE_CASE = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( __lowerCAmelCase : str ) -> List[Any]:
snake_case = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 370
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
| 0
|
"""simple docstring"""
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density")
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
if n == 0:
return 0
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) )
return max_revue
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , )
__SCREAMING_SNAKE_CASE = max_revenue
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max_rev[i]
for j in range(1 , i + 1 ):
__SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j] )
__SCREAMING_SNAKE_CASE = max_revenue_i
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
if n < 0:
__SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(a__ )
if n > len(a__ ):
__SCREAMING_SNAKE_CASE = (
"""Each integral piece of rod must have a corresponding price. """
F'Got n = {n} but length of prices = {len(a__ )}'
)
raise ValueError(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23]
__SCREAMING_SNAKE_CASE = len(a__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__SCREAMING_SNAKE_CASE = 36
__SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 267
| 0
|
def A (__A : int = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = 2**power
UpperCAmelCase_ = 0
while n:
UpperCAmelCase_ , UpperCAmelCase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 7
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7
| 1
|
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> Union[str, Any]:
"""simple docstring"""
return sum(e for e in range(3 , A_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 344
|
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__UpperCamelCase : str = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__UpperCamelCase : Dict = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__UpperCamelCase : int = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 106
| 0
|
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
A_ : Optional[Any] = str(bin(a_ ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
A_ : Optional[Any] = str(bin(a_ ) )[2:]
if shift_amount >= len(a_ ):
return "0b0"
A_ : Tuple = binary_number[: len(a_ ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
A_ : int = """0""" + str(bin(a_ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
A_ : Dict = len(bin(a_ )[3:] ) # Find 2's complement of number
A_ : Tuple = bin(abs(a_ ) - (1 << binary_number_length) )[3:]
A_ : Dict = (
"""1""" + """0""" * (binary_number_length - len(a_ )) + binary_number
)
if shift_amount >= len(a_ ):
return "0b" + binary_number[0] * len(a_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCAmelCase ( a_ , a_ = 3 ) -> list:
"""simple docstring"""
A_ : Tuple = min(a_ )
A_ : Union[str, Any] = max(a_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , a_ ) for x in data]
def UpperCAmelCase ( a_ , a_ = 3 ) -> list:
"""simple docstring"""
A_ : List[str] = mean(a_ )
A_ : List[str] = stdev(a_ )
# standardize data
return [round((x - mu) / (sigma) , a_ ) for x in data]
| 164
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : List[str] ,A : List[Any]=3 ,A : Any=32 ,A : Optional[int]=3 ,A : Optional[int]=10 ,A : Optional[Any]=[8, 16, 32, 64] ,A : Optional[Any]=[1, 1, 2, 1] ,A : Any=True ,A : str=True ,A : Any="relu" ,A : Dict=3 ,A : Optional[Any]=None ,A : Dict=["stage2", "stage3", "stage4"] ,A : List[str]=[2, 3, 4] ,A : Union[str, Any]=1 ,):
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = embeddings_size
__A = hidden_sizes
__A = depths
__A = is_training
__A = use_labels
__A = hidden_act
__A = num_labels
__A = scope
__A = len(A )
__A = out_features
__A = out_indices
__A = num_groups
def UpperCamelCase_ ( self : Any ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Tuple ):
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def UpperCamelCase_ ( self : Dict ,A : int ,A : Dict ,A : List[Any] ):
__A = BitModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[Any] ,A : List[str] ,A : Tuple ):
__A = self.num_labels
__A = BitForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[str] ,A : List[str] ):
__A = BitBackbone(config=A )
model.to(A )
model.eval()
__A = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__A = None
__A = BitBackbone(config=A )
model.to(A )
model.eval()
__A = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Optional[int] ):
__A = BitModelTester(self )
__A = ConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : Optional[int] ):
return
@unittest.skip(reason="Bit does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def UpperCamelCase_ ( self : List[str] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(config=A )
for name, module in model.named_modules():
if isinstance(A ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
def UpperCamelCase_ ( self : Tuple ):
def check_hidden_states_output(A : Any ,A : List[str] ,A : Tuple ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A = self.model_tester.num_stages
self.assertEqual(len(A ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A = layer_type
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = BitModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Any ):
__A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (BitBackbone,) if is_torch_available() else ()
snake_case_ = BitConfig
snake_case_ = False
def UpperCamelCase_ ( self : Optional[int] ):
__A = BitModelTester(self )
| 15
|
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15
| 1
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCAmelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ) -> List[Any]:
'''simple docstring'''
__lowercase= transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowercase= {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowercase= collections.defaultdict(lowercase__ )
__lowercase= collections.defaultdict(lowercase__ )
__lowercase= collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__ ):
__lowercase= None
if _re_tf_models.match(lowercase__ ) is not None:
__lowercase= tf_models
__lowercase= _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
__lowercase= flax_models
__lowercase= _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
__lowercase= pt_models
__lowercase= _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__lowercase= True
break
# Try again after removing the last word in the name
__lowercase= ''.join(camel_case_split(lowercase__ )[:-1] )
__lowercase= set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__lowercase= list(lowercase__ )
all_models.sort()
__lowercase= {'model_type': all_models}
__lowercase= [pt_models[t] for t in all_models]
__lowercase= [tf_models[t] for t in all_models]
__lowercase= [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowercase= {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowercase= 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowercase= 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowercase= 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowercase= 'AutoTokenizer'
__lowercase= [processors[t] for t in all_models]
return pd.DataFrame(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowercase= [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
__lowercase= [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__ ):
continue
# First extract all model_names
__lowercase= []
for name in getattr(lowercase__ , lowercase__ ).values():
if isinstance(lowercase__ , lowercase__ ):
model_names.append(lowercase__ )
else:
model_names.extend(list(lowercase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= get_frameworks_table()
__lowercase= Dataset.from_pandas(lowercase__ )
__lowercase= hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase__ )
__lowercase= Dataset.from_json(lowercase__ )
__lowercase= {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase__ ) )
}
__lowercase= update_pipeline_and_auto_class_table(lowercase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowercase= sorted(table.keys() )
__lowercase= pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__lowercase= Dataset.from_pandas(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase__ , 'pipeline_tags.json' ) )
if commit_sha is not None:
__lowercase= (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
__lowercase= 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase__ , repo_type='dataset' , token=lowercase__ , commit_message=lowercase__ , )
def _lowerCamelCase( ) -> Any:
'''simple docstring'''
__lowercase= {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowercase= transformers_module.pipelines.SUPPORTED_TASKS
__lowercase= []
for key in pipeline_tasks:
if key not in in_table:
__lowercase= pipeline_tasks[key]['pt']
if isinstance(lowercase__ , (list, tuple) ):
__lowercase= model[0]
__lowercase= model.__name__
if model not in in_table.values():
missing.append(lowercase__ )
if len(lowercase__ ) > 0:
__lowercase= ', '.join(lowercase__ )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 304
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCamelCase : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCAmelCase ):
__lowerCamelCase : Any = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__lowerCamelCase : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCamelCase : str = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase : Any = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase : Union[str, Any] = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
__lowerCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : str = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 352
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = ort.SessionOptions()
__lowerCamelCase : List[Any] = False
return options
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowerCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
__lowerCamelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Dict = "A red cat sitting on a park bench"
__lowerCamelCase : List[str] = np.random.RandomState(0 )
__lowerCamelCase : str = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = 2**power
A__ = 0
while n:
A__ , A__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 7
|
from typing import Dict
from .base import GenericTensor, Pipeline
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]:
'''simple docstring'''
A__ = self.framework
A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ )
return model_inputs
def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]:
'''simple docstring'''
A__ = self.model(**lowercase_ )
return model_outputs
def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int:
'''simple docstring'''
return super().__call__(*lowercase_,**lowercase_ )
| 7
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=1_3 , _lowerCAmelCase : Dict=3_0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=3_2 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Optional[Any]=3_7 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[int]=1_0 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : str=2 , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =is_training
__lowercase =use_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =scope
__lowercase =encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase =(image_size // patch_size) ** 2
__lowercase =num_patches + 1
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowercase =self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCamelCase ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =ViTModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__lowercase =model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Dict):
'''simple docstring'''
__lowercase =ViTForMaskedImageModeling(config=__lowercase)
model.to(__lowercase)
model.eval()
__lowercase =model(__lowercase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowercase =1
__lowercase =ViTForMaskedImageModeling(__lowercase)
model.to(__lowercase)
model.eval()
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(__lowercase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =self.type_sequence_label_size
__lowercase =ViTForImageClassification(__lowercase)
model.to(__lowercase)
model.eval()
__lowercase =model(__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowercase =1
__lowercase =ViTForImageClassification(__lowercase)
model.to(__lowercase)
model.eval()
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
(
__lowercase
) =config_and_inputs
__lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =ViTModelTester(self)
__lowercase =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=3_7)
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowerCamelCase ( self : str):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(__lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear))
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(__lowercase)
__lowercase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase)
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =ViTModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def _A ( ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(__lowercase)
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=__lowercase , return_tensors='pt').to(__lowercase)
# forward pass
with torch.no_grad():
__lowercase =model(**__lowercase)
# verify the logits
__lowercase =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __lowercase)
__lowercase =torch.tensor([-0.2744, 0.8215, -0.0836]).to(__lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4))
@slow
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =ViTModel.from_pretrained('facebook/dino-vits8').to(__lowercase)
__lowercase =ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
__lowercase =prepare_img()
__lowercase =image_processor(images=__lowercase , return_tensors='pt')
__lowercase =inputs.pixel_values.to(__lowercase)
# forward pass
with torch.no_grad():
__lowercase =model(__lowercase , interpolate_pos_encoding=__lowercase)
# verify the logits
__lowercase =torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , __lowercase)
__lowercase =torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(__lowercase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=__lowercase , return_tensors='pt')
__lowercase =inputs.pixel_values.to(__lowercase)
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowercase =model(__lowercase)
| 350
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _UpperCamelCase ( A , A ):
'''simple docstring'''
lowerCAmelCase__ = """resnet"""
lowerCAmelCase__ = ["""basic""", """bottleneck"""]
def __init__( self : Any , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=6_4 , _lowerCAmelCase : str=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCAmelCase : Any=[3, 4, 6, 3] , _lowerCAmelCase : List[Any]="bottleneck" , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : int=False , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =downsample_in_first_stage
__lowercase =['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase) + 1)]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return 1e-3
| 48
| 0
|
from math import pow, sqrt
def lowercase( *UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = len(lowercase__ ) > 0 and all(value > 0.0 for value in values )
return result
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase__ , lowercase__ , lowercase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 343
|
'''simple docstring'''
def _A ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__A = generate_large_matrix()
__A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( lowercase__ ):
assert all(row == sorted(lowercase__ , reverse=lowercase__ ) for row in grid )
assert all(list(lowercase__ ) == sorted(lowercase__ , reverse=lowercase__ ) for col in zip(*lowercase__ ) )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(lowercase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase__ = (left + right) // 2
lowercase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase__ = mid + 1
else:
lowercase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase__ )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(grid[0] )
for i in range(len(lowercase__ ) ):
lowercase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase__ ) * len(grid[0] )) - total
def _A ( lowercase__ ):
return len([number for row in grid for number in row if number < 0] )
def _A ( lowercase__ ):
lowercase__ = 0
for row in grid:
for i, number in enumerate(lowercase__ ):
if number < 0:
total += len(lowercase__ ) - i
break
return total
def _A ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowercase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase__ = timeit(f'''{func}(grid=grid)''' , setup=lowercase__ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 164
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 2
while True:
if is_prime(A_ ):
yield num
num += 1
def lowerCamelCase__ ( _lowercase = 2000000 ):
'''simple docstring'''
return sum(takewhile(lambda _lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 358
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : str = {}
UpperCAmelCase_, UpperCAmelCase_ : List[str] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : int = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : str = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Optional[Any] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[int] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : Tuple = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : str = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : List[str] = storage.field('''path''' )
else:
UpperCAmelCase_ : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : List[Any] = f.read()
return bytes_
UpperCAmelCase_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : Union[str, Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : List[str] = image.format
else:
UpperCAmelCase_ : int = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : int = array.dtype
UpperCAmelCase_ : Optional[Any] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : str = dtype.kind
UpperCAmelCase_ : int = dtype.itemsize
UpperCAmelCase_ : Optional[int] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : str = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Dict = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Optional[int] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : Dict = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Dict = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : List[str] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 235
| 0
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __UpperCAmelCase ( A : int , A : int=None ) -> int:
UpperCAmelCase_ : Optional[Any] = None
if token is not None:
UpperCAmelCase_ : int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : List[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase_ : Dict = requests.get(A , headers=A ).json()
UpperCAmelCase_ : Dict = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase_ : Optional[int] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A ):
UpperCAmelCase_ : int = requests.get(url + F"&page={i + 2}" , headers=A ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __UpperCAmelCase ( A : Optional[int] , A : List[Any]=None ) -> Any:
UpperCAmelCase_ : List[Any] = None
if token is not None:
UpperCAmelCase_ : Dict = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : List[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCAmelCase_ : Tuple = requests.get(A , headers=A ).json()
UpperCAmelCase_ : List[str] = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
UpperCAmelCase_ : List[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A ):
UpperCAmelCase_ : Any = requests.get(url + F"&page={i + 2}" , headers=A ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def __UpperCAmelCase ( A : str , A : Optional[Any] , A : Tuple , A : Union[str, Any] ) -> str:
UpperCAmelCase_ : int = None
if token is not None:
UpperCAmelCase_ : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase_ : Union[str, Any] = requests.get(A , headers=A , allow_redirects=A )
UpperCAmelCase_ : int = result.headers['''Location''']
UpperCAmelCase_ : int = requests.get(A , allow_redirects=A )
UpperCAmelCase_ : Union[str, Any] = os.path.join(A , F"{artifact_name}.zip" )
with open(A , '''wb''' ) as fp:
fp.write(response.content )
def __UpperCAmelCase ( A : Optional[Any] , A : Any=None ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : str = None
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A ) as f:
for line in f:
UpperCAmelCase_ : Any = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase_ : int = line[: line.index(''': ''' )]
UpperCAmelCase_ : Union[str, Any] = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
UpperCAmelCase_ : List[str] = line[len('''FAILED ''' ) :]
failed_tests.append(A )
elif filename == "job_name.txt":
UpperCAmelCase_ : List[str] = line
if len(A ) != len(A ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(A )} for `errors` "
F"and {len(A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
''' problem.''' )
UpperCAmelCase_ : List[str] = None
if job_name and job_links:
UpperCAmelCase_ : Dict = job_links.get(A , A )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase_ : str = [x + [y] + [job_link] for x, y in zip(A , A )]
return result
def __UpperCAmelCase ( A : Dict , A : Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Tuple = [os.path.join(A , A ) for p in os.listdir(A ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A , job_links=A ) )
return errors
def __UpperCAmelCase ( A : Tuple , A : Any=None ) -> str:
UpperCAmelCase_ : List[Any] = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase_ : str = counter.most_common()
UpperCAmelCase_ : Optional[Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase_ : Any = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase_ : str = dict(sorted(r.items() , key=lambda A : item[1]["count"] , reverse=A ) )
return r
def __UpperCAmelCase ( A : str ) -> Union[str, Any]:
UpperCAmelCase_ : int = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
UpperCAmelCase_ : Dict = test.split('''/''' )[2]
else:
UpperCAmelCase_ : Tuple = None
return test
def __UpperCAmelCase ( A : List[str] , A : str=None ) -> Optional[Any]:
UpperCAmelCase_ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase_ : List[str] = [x for x in logs if x[2] is not None]
UpperCAmelCase_ : str = {x[2] for x in logs}
UpperCAmelCase_ : List[str] = {}
for test in tests:
UpperCAmelCase_ : List[Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase_ : List[str] = counter.most_common()
UpperCAmelCase_ : List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase_ : int = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase_ : Tuple = {'''count''': n_errors, '''errors''': error_counts}
UpperCAmelCase_ : List[str] = dict(sorted(r.items() , key=lambda A : item[1]["count"] , reverse=A ) )
return r
def __UpperCAmelCase ( A : Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : int = '''| no. | error | status |'''
UpperCAmelCase_ : Optional[int] = '''|-:|:-|:-|'''
UpperCAmelCase_ : Tuple = [header, sep]
for error in reduced_by_error:
UpperCAmelCase_ : Optional[Any] = reduced_by_error[error]['''count''']
UpperCAmelCase_ : Optional[Any] = F"| {count} | {error[:1_0_0]} | |"
lines.append(A )
return "\n".join(A )
def __UpperCAmelCase ( A : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = '''| model | no. of errors | major error | count |'''
UpperCAmelCase_ : int = '''|-:|-:|-:|-:|'''
UpperCAmelCase_ : Dict = [header, sep]
for model in reduced_by_model:
UpperCAmelCase_ : Optional[int] = reduced_by_model[model]['''count''']
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = list(reduced_by_model[model]['''errors'''].items() )[0]
UpperCAmelCase_ : Optional[Any] = F"| {model} | {count} | {error[:6_0]} | {_count} |"
lines.append(A )
return "\n".join(A )
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_UpperCamelCase : Optional[int] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_UpperCamelCase : Any = get_job_links(args.workflow_run_id, token=args.token)
_UpperCamelCase : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_UpperCamelCase : Optional[int] = k.find(' / ')
_UpperCamelCase : Tuple = k[index + len(' / ') :]
_UpperCamelCase : str = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_UpperCamelCase : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_UpperCamelCase : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_UpperCamelCase : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_UpperCamelCase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_UpperCamelCase : Any = reduce_by_error(errors)
_UpperCamelCase : List[str] = reduce_by_model(errors)
_UpperCamelCase : Dict = make_github_table(reduced_by_error)
_UpperCamelCase : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 304
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]:
if isinstance(A , A ):
UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase_ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase_ : Dict = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : int = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( A : List[Any] ) -> str:
UpperCAmelCase_ : Dict = ord(A )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class snake_case__ ( UpperCamelCase):
a_ = 42
a_ = True
a_ = None
a_ = None
a_ = -100
a_ = "pt"
def A ( self : List[Any] , _A : Dict ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : Tuple = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[Any] = [
list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels
]
else:
UpperCAmelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels
]
UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A )
UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 304
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__a)
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
# No specific FOR_XXX available yet
def __call__( self, __a, **__a):
'''simple docstring'''
return super().__call__(__a, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase : Tuple = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_lowerCAmelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def snake_case__ ( self, __a, __a=None, __a="This is a sound of {}."):
'''simple docstring'''
if isinstance(__a, __a):
if audio.startswith("http://") or audio.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase : Optional[int] = requests.get(__a).content
else:
with open(__a, "rb") as f:
_lowerCAmelCase : int = f.read()
if isinstance(__a, __a):
_lowerCAmelCase : str = ffmpeg_read(__a, self.feature_extractor.sampling_rate)
if not isinstance(__a, np.ndarray):
raise ValueError("We expect a numpy ndarray as input")
if len(audio.shape) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline")
_lowerCAmelCase : Any = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt")
_lowerCAmelCase : List[str] = candidate_labels
_lowerCAmelCase : Any = [hypothesis_template.format(__a) for x in candidate_labels]
_lowerCAmelCase : Optional[int] = self.tokenizer(__a, return_tensors=self.framework, padding=__a)
_lowerCAmelCase : Tuple = [text_inputs]
return inputs
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = model_inputs.pop("candidate_labels")
_lowerCAmelCase : Any = model_inputs.pop("text_inputs")
if isinstance(text_inputs[0], __a):
_lowerCAmelCase : List[Any] = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase : List[Any] = text_inputs[0][0]
_lowerCAmelCase : Any = self.model(**__a, **__a)
_lowerCAmelCase : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = model_outputs.pop("candidate_labels")
_lowerCAmelCase : str = model_outputs["logits"][0]
if self.framework == "pt":
_lowerCAmelCase : str = logits.softmax(dim=0)
_lowerCAmelCase : Optional[Any] = probs.tolist()
else:
raise ValueError("`tf` framework not supported.")
_lowerCAmelCase : Any = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__a, __a), key=lambda __a: -x[0])
]
return result
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 2000, __a = 0.15, __a = 0.01, __a = 1_348.0, __a = 1E-5, __a = 1, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : str = None
self.set_sigmas(__a, __a, __a, __a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCAmelCase : Dict = torch.linspace(1, __a, __a, device=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a, __a)
_lowerCAmelCase : int = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(__a), math.log(__a), __a))
_lowerCAmelCase : int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device)), self.discrete_sigmas[timesteps - 1].to(timesteps.device), )
def snake_case__ ( self, __a, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
_lowerCAmelCase : Dict = timestep * torch.ones(
sample.shape[0], device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCAmelCase : Dict = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCAmelCase : Union[str, Any] = timesteps.to(self.discrete_sigmas.device)
_lowerCAmelCase : Any = self.discrete_sigmas[timesteps].to(sample.device)
_lowerCAmelCase : List[Any] = self.get_adjacent_sigma(__a, __a).to(sample.device)
_lowerCAmelCase : List[str] = torch.zeros_like(__a)
_lowerCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCAmelCase : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_lowerCAmelCase : Optional[int] = diffusion.unsqueeze(-1)
_lowerCAmelCase : Dict = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCAmelCase : Optional[Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__a, device=sample.device, dtype=sample.dtype)
_lowerCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a, prev_sample_mean=__a)
def snake_case__ ( self, __a, __a, __a = None, __a = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape, layout=sample.layout, generator=__a).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_lowerCAmelCase : Any = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Dict = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean()
_lowerCAmelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCAmelCase : Dict = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_lowerCAmelCase : int = step_size.unsqueeze(-1)
_lowerCAmelCase : List[Any] = sample + step_size * model_output
_lowerCAmelCase : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a)
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = timesteps.to(original_samples.device)
_lowerCAmelCase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps]
_lowerCAmelCase : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a) * sigmas[:, None, None, None]
)
_lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = original_name.split("." )[0]
__SCREAMING_SNAKE_CASE = key.split("." )
__SCREAMING_SNAKE_CASE = int(key_list[key_list.index(snake_case__ ) - 2] )
__SCREAMING_SNAKE_CASE = int(key_list[key_list.index(snake_case__ ) - 1] )
__SCREAMING_SNAKE_CASE = orig_block_num - offset
__SCREAMING_SNAKE_CASE = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__SCREAMING_SNAKE_CASE = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__SCREAMING_SNAKE_CASE = key[: key.find("proj" )]
__SCREAMING_SNAKE_CASE = key.replace(snake_case__ , f"""patch_embeddings.{total_embed_found}.""" )
__SCREAMING_SNAKE_CASE = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__SCREAMING_SNAKE_CASE = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case__ , snake_case__ , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case__ , snake_case__ , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case__ , snake_case__ , "norm1" , "before_norm" )
if "norm2" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case__ , snake_case__ , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case__ , snake_case__ , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__SCREAMING_SNAKE_CASE = replace_key_with_offset(snake_case__ , snake_case__ , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__SCREAMING_SNAKE_CASE = key.replace("head" , "classifier" )
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PoolFormerConfig()
# set attributes based on model_name
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = model_name[-3:]
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = (1, 1000)
# set config attributes
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(snake_case__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "s12":
__SCREAMING_SNAKE_CASE = [2, 2, 6, 2]
__SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 0.9
elif size == "s24":
__SCREAMING_SNAKE_CASE = [4, 4, 12, 4]
__SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 0.9
elif size == "s36":
__SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
__SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 1E-6
__SCREAMING_SNAKE_CASE = 0.9
elif size == "m36":
__SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
__SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 1E-6
__SCREAMING_SNAKE_CASE = 0.95
elif size == "m48":
__SCREAMING_SNAKE_CASE = [8, 8, 24, 8]
__SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
__SCREAMING_SNAKE_CASE = 4.0
__SCREAMING_SNAKE_CASE = 1E-6
__SCREAMING_SNAKE_CASE = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
__SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=snake_case__ )
# Prepare image
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.load(snake_case__ , map_location=torch.device("cpu" ) )
# rename keys
__SCREAMING_SNAKE_CASE = rename_keys(snake_case__ )
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Define image processor
__SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=snake_case__ )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__SCREAMING_SNAKE_CASE = model(snake_case__ )
__SCREAMING_SNAKE_CASE = outputs.logits
# define expected logit slices for different models
if size == "s12":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__SCREAMING_SNAKE_CASE = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__SCREAMING_SNAKE_CASE = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__SCREAMING_SNAKE_CASE = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
a__ : Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 54
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
a_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple =parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__lowercase , required=__lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__lowercase , required=__lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__lowercase , required=__lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__lowercase , default=__lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Dict , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , __lowercase : str , *__lowercase : Optional[int] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int =logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_type
SCREAMING_SNAKE_CASE__ : Tuple =tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[str] =pytorch_dump_output
SCREAMING_SNAKE_CASE__ : str =config
SCREAMING_SNAKE_CASE__ : Any =finetuning_task_name
def __magic_name__ ( self : Tuple ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE__ : str =self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : str =''''''
else:
SCREAMING_SNAKE_CASE__ : Any =self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] =''''''
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 369
|
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
SCREAMING_SNAKE_CASE__ : List[str] =(l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE__ : Dict =m
else:
SCREAMING_SNAKE_CASE__ : Any =m # noqa: E741
return r
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[0]
for i in range(1, len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE__ : List[Any] =v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE__ : int =v[i]
length += 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
| 0
|
'''simple docstring'''
from copy import deepcopy
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
"""simple docstring"""
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(lowerCAmelCase_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = len(lowerCAmelCase_ )
_snake_case = deepcopy(lowerCAmelCase_ )
for i in range(1 , self.size ):
_snake_case = self.next_(lowerCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(lowerCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
self.add(lowerCAmelCase_ , value - self.get(lowerCAmelCase_ ) )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(lowerCAmelCase_ )
return result
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return self.prefix(lowerCAmelCase_ ) - self.prefix(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.query(lowerCAmelCase_ , index + 1 )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__A ='Create a default config file for Accelerate with only a few flags set.'
def _UpperCamelCase ( UpperCamelCase__="no" , UpperCamelCase__ = default_json_config_file , UpperCamelCase__ = False ):
UpperCAmelCase__ : Any = Path(UpperCamelCase__ )
path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
UpperCAmelCase__ : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
UpperCAmelCase__ : Optional[int] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase__ : str = torch.cuda.device_count()
UpperCAmelCase__ : int = num_gpus
UpperCAmelCase__ : Union[str, Any] = False
if num_gpus > 1:
UpperCAmelCase__ : Union[str, Any] = """MULTI_GPU"""
else:
UpperCAmelCase__ : Dict = """NO"""
elif is_xpu_available() and use_xpu:
UpperCAmelCase__ : Any = torch.xpu.device_count()
UpperCAmelCase__ : Dict = num_xpus
UpperCAmelCase__ : Dict = False
if num_xpus > 1:
UpperCAmelCase__ : int = """MULTI_XPU"""
else:
UpperCAmelCase__ : Any = """NO"""
elif is_npu_available():
UpperCAmelCase__ : Dict = torch.npu.device_count()
UpperCAmelCase__ : str = num_npus
UpperCAmelCase__ : Tuple = False
if num_npus > 1:
UpperCAmelCase__ : Union[str, Any] = """MULTI_NPU"""
else:
UpperCAmelCase__ : Optional[Any] = """NO"""
else:
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Union[str, Any] = """NO"""
UpperCAmelCase__ : Dict = ClusterConfig(**UpperCamelCase__ )
config.to_json_file(UpperCamelCase__ )
return path
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = parser.add_parser("""default""" , parents=UpperCamelCase__ , help=UpperCamelCase__ , formatter_class=UpperCamelCase__ )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=UpperCamelCase__ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'''accelerate configuration saved at {config_file}''' )
| 283
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
UpperCAmelCase__ : str = """backbone.""" if is_semantic else """"""
UpperCAmelCase__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ : Optional[Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCAmelCase__ : Any = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : List[str] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : int = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Any = q_bias
UpperCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase__ : Union[str, Any] = gamma_a
UpperCAmelCase__ : str = gamma_a
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = val
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
UpperCAmelCase__ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
UpperCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=UpperCamelCase__ , use_mask_token=UpperCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = 1_0_2_4
UpperCAmelCase__ : Dict = 4_0_9_6
UpperCAmelCase__ : Any = 2_4
UpperCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : List[str] = """huggingface/label-files"""
UpperCAmelCase__ : Optional[Any] = """rvlcdip-id2label.json"""
UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : List[str] = create_rename_keys(UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
# load HuggingFace model
UpperCAmelCase__ : str = BeitForMaskedImageModeling(UpperCamelCase__ ) if has_lm_head else BeitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
UpperCAmelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[Any] = encoding["""pixel_values"""]
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase__ )
UpperCAmelCase__ : int = outputs.logits
# verify logits
UpperCAmelCase__ : int = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(UpperCamelCase__ ), "Shape of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ : Union[str, Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCAmelCase__ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float("""nan""")
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = sys.stdout
A__ = open(lowercase , "a" )
def __getattr__( self , lowercase ) -> List[Any]:
'''simple docstring'''
return getattr(self.stdout , lowercase )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
self.stdout.write(lowercase )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , lowercase , 0 , re.M ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int]=8_0 , SCREAMING_SNAKE_CASE_: List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
A__ = []
# deal with critical env vars
A__ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
A__ = os.environ.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A__ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(SCREAMING_SNAKE_CASE_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A__ = []
A__ = ""
while len(SCREAMING_SNAKE_CASE_ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE_ )
A__ = ""
return "\\\n".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: int ) -> Union[str, Any]:
'''simple docstring'''
A__ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
A__ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A__ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: str ) -> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_0_0 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
A__ = subprocess.run(SCREAMING_SNAKE_CASE_ , capture_output=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
A__ = variation.replace(" " , "-" )
with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stdout.txt' , "w" ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE_ ) / F'log.{prefix}.stderr.txt' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' , "r" , encoding="utf-8" ) as f:
A__ = json.load(SCREAMING_SNAKE_CASE_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str , ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = F'{id}: {variation:<{longest_variation_len}}'
A__ = F'{preamble}: '
A__ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE_ ) , desc=SCREAMING_SNAKE_CASE_ , leave=SCREAMING_SNAKE_CASE_ ):
A__ = process_run_single(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE_ ):
metrics.append(SCREAMING_SNAKE_CASE_ )
results.append(SCREAMING_SNAKE_CASE_ )
outcome += "✓"
else:
outcome += "✘"
A__ = F'\33[2K\r{outcome}'
if len(SCREAMING_SNAKE_CASE_ ) > 0:
A__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A__ = round(mean_metrics[target_metric_key] , 2 )
A__ = F'{outcome} {mean_target}'
if len(SCREAMING_SNAKE_CASE_ ) > 1:
results_str += F' {tuple(round(SCREAMING_SNAKE_CASE_ , 2 ) for x in results )}'
print(SCREAMING_SNAKE_CASE_ )
A__ = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE_ )
return {variation_key: variation, target_metric_key: nan}
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
A__ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**3_0:0.2f}GB\n'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = pd.DataFrame(SCREAMING_SNAKE_CASE_ )
A__ = "variation"
A__ = "diff_%"
A__ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A__ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE_ ):
# as a fallback, use the minimal value as the sentinel
A__ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE_ ):
A__ = df.apply(
lambda SCREAMING_SNAKE_CASE_ : round(1_0_0 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
A__ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
A__ = df.reindex(SCREAMING_SNAKE_CASE_ , axis="columns" ) # reorder cols
# capitalize
A__ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "<br>" ) , axis="columns" )
A__ = df.rename(lambda SCREAMING_SNAKE_CASE_ : c.replace("_" , "\n" ) , axis="columns" )
A__ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE_ , floatfmt=".2f" )]
print("\n\n".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , nargs="+" , required=SCREAMING_SNAKE_CASE_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=SCREAMING_SNAKE_CASE_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=SCREAMING_SNAKE_CASE_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
A__ = parser.parse_args()
A__ = args.output_dir
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
A__ = get_base_command(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# split each dimension into its --foo variations
A__ = [list(map(str.strip , re.split(R"\|" , SCREAMING_SNAKE_CASE_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A__ = list(map(str.strip , map(" ".join , itertools.product(*SCREAMING_SNAKE_CASE_ ) ) ) )
A__ = max(len(SCREAMING_SNAKE_CASE_ ) for x in variations )
# split wanted keys
A__ = args.report_metric_keys.split()
# capture prints into a log file for convenience
A__ = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A__ = Tee(SCREAMING_SNAKE_CASE_ )
print(F'\n*** Running {len(SCREAMING_SNAKE_CASE_ )} benchmarks:' )
print(F'Base command: {" ".join(SCREAMING_SNAKE_CASE_ )}' )
A__ = "variation"
A__ = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="Total completion: " , leave=SCREAMING_SNAKE_CASE_ ) ):
A__ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.repeat_times , SCREAMING_SNAKE_CASE_ , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE_ , args.target_metric_key , SCREAMING_SNAKE_CASE_ , args.base_variation , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Optional[int] = "CLIPImageProcessor"
UpperCAmelCase__ : Tuple = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : int = kwargs.pop('''feature_extractor''' )
_a : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a : str = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_a : Optional[Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_a : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Optional[int]:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : Dict = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 235
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "big_bird"
def __init__(self : List[str] , UpperCAmelCase_ : Optional[Any]=50_358 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[str]=3_072 , UpperCAmelCase_ : int="gelu_new" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=4_096 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-1_2 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=66 , UpperCAmelCase_ : Union[str, Any]="block_sparse" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : List[str] , ) ->int:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =vocab_size
lowerCamelCase__: List[str] =max_position_embeddings
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[Any] =num_hidden_layers
lowerCamelCase__: List[str] =num_attention_heads
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Tuple =hidden_act
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: Optional[Any] =type_vocab_size
lowerCamelCase__: List[str] =layer_norm_eps
lowerCamelCase__: Optional[int] =use_cache
lowerCamelCase__: int =rescale_embeddings
lowerCamelCase__: int =attention_type
lowerCamelCase__: str =use_bias
lowerCamelCase__: List[Any] =block_size
lowerCamelCase__: List[Any] =num_random_blocks
lowerCamelCase__: List[Any] =classifier_dropout
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: List[Any] ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: int ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 369
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[str] =inspect.getfile(accelerate.test_utils)
lowerCamelCase__: str =os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["scripts", "external_deps", "test_metrics.py"])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase__: str =test_metrics
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
debug_launcher(self.test_metrics.main)
@require_single_gpu
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""")
lowerCamelCase__: Optional[Any] =["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 273
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCAmelCase : List[str] = logging.getLogger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''sequence-classification'''
def __init__( self :List[Any] , snake_case :int ):
'''simple docstring'''
if type(snake_case ) == dict:
A_ : List[Any] = Namespace(**snake_case )
A_ : Union[str, Any] = glue_output_modes[hparams.task]
A_ : Any = glue_tasks_num_labels[hparams.task]
super().__init__(snake_case , snake_case , self.mode )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :Optional[int] ):
'''simple docstring'''
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
A_ : List[str] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ : str = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A_ : str = self(**snake_case )
A_ : Dict = outputs[0]
A_ : Optional[int] = self.trainer.lr_schedulers[0]["scheduler"]
A_ : Any = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.hparams
A_ : Optional[Any] = processors[args.task]()
A_ : Tuple = processor.get_labels()
for mode in ["train", "dev"]:
A_ : Optional[Any] = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , snake_case )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
A_ : Tuple = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
A_ : List[str] = convert_examples_to_features(
snake_case , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :int , snake_case :bool = False ):
'''simple docstring'''
A_ : Union[str, Any] = "dev" if mode == "test" else mode
A_ : int = self._feature_file(snake_case )
logger.info("Loading features from cached file %s" , snake_case )
A_ : Any = torch.load(snake_case )
A_ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case , shuffle=snake_case , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :Tuple ):
'''simple docstring'''
A_ : str = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A_ : int = self(**snake_case )
A_ , A_ : Optional[Any] = outputs[:2]
A_ : Any = logits.detach().cpu().numpy()
A_ : List[str] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE ( self :str , snake_case :List[str] ):
'''simple docstring'''
A_ : List[str] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
A_ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ : Union[str, Any] = np.argmax(snake_case , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ : Tuple = np.squeeze(snake_case )
A_ : Dict = np.concatenate([x["target"] for x in outputs] , axis=0 )
A_ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : List[str] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , snake_case , snake_case )}
A_ : int = dict(results.items() )
A_ : List[Any] = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :list ):
'''simple docstring'''
A_ , A_ , A_ : Dict = self._eval_end(snake_case )
A_ : List[str] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any ):
'''simple docstring'''
A_ , A_ , A_ : List[str] = self._eval_end(snake_case )
A_ : Union[str, Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[Any] , snake_case :Tuple ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=snake_case , required=snake_case , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=snake_case , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __snake_case ( ) -> Any:
A_ : Optional[int] = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd() )
A_ : str = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd() )
A_ : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ : Tuple = os.path.join(
"./results" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ : Any = GLUETransformer(_lowerCAmelCase )
A_ : Dict = generic_train(_lowerCAmelCase , _lowerCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ : int = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_lowerCAmelCase ) )
A_ : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 300
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :AutoencoderKL , snake_case :CLIPTextModel , snake_case :CLIPTokenizer , snake_case :UNetaDConditionModel , snake_case :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case :StableDiffusionSafetyChecker , snake_case :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self :Any , snake_case :Union[str, List[str]] , snake_case :int = 512 , snake_case :int = 512 , snake_case :int = 50 , snake_case :float = 7.5 , snake_case :Optional[Union[str, List[str]]] = None , snake_case :Optional[int] = 1 , snake_case :float = 0.0 , snake_case :Optional[torch.Generator] = None , snake_case :Optional[torch.FloatTensor] = None , snake_case :Optional[str] = "pil" , snake_case :bool = True , snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case :int = 1 , snake_case :Optional[torch.FloatTensor] = None , **snake_case :Optional[Any] , ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
A_ : Dict = 1
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get prompt text embeddings
A_ : int = self.tokenizer(
snake_case , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : int = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 , snake_case , 1 )
A_ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : List[str]
if negative_prompt is None:
A_ : List[str] = [""]
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !="
f" {type(snake_case )}." )
elif isinstance(snake_case , snake_case ):
A_ : Optional[Any] = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A_ : Any = negative_prompt
A_ : Optional[int] = text_input_ids.shape[-1]
A_ : Dict = self.tokenizer(
snake_case , padding="max_length" , max_length=snake_case , truncation=snake_case , return_tensors="pt" , )
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Tuple = uncond_embeddings.shape[1]
A_ : Dict = uncond_embeddings.repeat(snake_case , snake_case , 1 )
A_ : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Tuple = torch.randn(
snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(self.device )
A_ : Optional[Any] = torch.randn(snake_case , generator=snake_case , device="cpu" , dtype=snake_case ).to(
self.device )
else:
A_ : int = torch.randn(
snake_case , generator=snake_case , device=self.device , dtype=snake_case )
A_ : Optional[int] = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A_ : Tuple = latents_reference.to(self.device )
A_ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Optional[Any] = 0 if dx < 0 else dx
A_ : Optional[Any] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx , 0 )
A_ : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[str] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
A_ : List[str] = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
A_ : List[str] = 1 / 0.18215 * latents
A_ : Tuple = self.vae.decode(snake_case ).sample
A_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(snake_case ) , return_tensors="pt" ).to(
self.device )
A_ , A_ : List[str] = self.safety_checker(
images=snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : List[str] = None
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 300
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : List[str]=8 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[Any]=99 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=36 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : List[Any]=512 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
lowercase__ : Any = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Optional[int] = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : int = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : str = num_labels
lowercase__ : List[str] = num_choices
lowercase__ : Any = scope
def snake_case ( self : Optional[Any] ):
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : int = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : str = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : List[Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def snake_case ( self : Optional[int] ):
lowercase__ : str = self.get_config()
lowercase__ : str = 300
return config
def snake_case ( self : Optional[int] ):
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ : Dict = True
lowercase__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[int] = MraModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : str = True
lowercase__ : Optional[int] = MraModel(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : int = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : str = MraForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Union[str, Any] = MraForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[int] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : List[str] = self.num_labels
lowercase__ : Union[str, Any] = MraForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : List[Any] = MraForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = self.num_choices
lowercase__ : List[Any] = MraForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[Any] = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = ()
def snake_case ( self : Tuple ):
lowercase__ : List[str] = MraModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[Any] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Union[str, Any] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = MraModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MRA does not output attentions" )
def snake_case ( self : Dict ):
return
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Any ):
lowercase__ : Optional[int] = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
lowercase__ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : int = model(SCREAMING_SNAKE_CASE )[0]
lowercase__ : Dict = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[int] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
lowercase__ : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0]
lowercase__ : str = 50_265
lowercase__ : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def snake_case ( self : List[str] ):
lowercase__ : Tuple = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
lowercase__ : Union[str, Any] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )[0]
lowercase__ : Union[str, Any] = 50_265
lowercase__ : int = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
lowercase__ : List[str] = []
for old_item in old_list:
lowercase__ : Optional[Any] = old_item.replace("in_layers.0" , "norm1" )
lowercase__ : Union[str, Any] = new_item.replace("in_layers.2" , "conv1" )
lowercase__ : Optional[Any] = new_item.replace("out_layers.0" , "norm2" )
lowercase__ : Union[str, Any] = new_item.replace("out_layers.3" , "conv2" )
lowercase__ : Dict = new_item.replace("emb_layers.1" , "time_emb_proj" )
lowercase__ : int = new_item.replace("skip_connection" , "conv_shortcut" )
lowercase__ : Tuple = shave_segments(lowerCamelCase__ , n_shave_prefix_segments=lowerCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
lowercase__ : str = []
for old_item in old_list:
lowercase__ : Optional[int] = old_item
lowercase__ : Dict = new_item.replace("norm.weight" , "group_norm.weight" )
lowercase__ : Optional[int] = new_item.replace("norm.bias" , "group_norm.bias" )
lowercase__ : Tuple = new_item.replace("proj_out.weight" , "proj_attn.weight" )
lowercase__ : List[Any] = new_item.replace("proj_out.bias" , "proj_attn.bias" )
lowercase__ : Optional[Any] = shave_segments(lowerCamelCase__ , n_shave_prefix_segments=lowerCamelCase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase__ : List[str] = old_checkpoint[path]
lowercase__ : str = old_tensor.shape[0] // 3
lowercase__ : List[str] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase__ : Union[str, Any] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowercase__ : Union[str, Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = old_tensor.split(channels // num_heads , dim=1 )
lowercase__ : Dict = query.reshape(lowerCamelCase__ )
lowercase__ : Dict = key.reshape(lowerCamelCase__ )
lowercase__ : int = value.reshape(lowerCamelCase__ )
for path in paths:
lowercase__ : Union[str, Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase__ : List[Any] = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
lowercase__ : Optional[Any] = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
lowercase__ : List[str] = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase__ : Tuple = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase__ : List[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowercase__ : List[Any] = old_checkpoint[path["old"]]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = checkpoint["time_embed.0.weight"]
lowercase__ : Tuple = checkpoint["time_embed.0.bias"]
lowercase__ : Dict = checkpoint["time_embed.2.weight"]
lowercase__ : Optional[Any] = checkpoint["time_embed.2.bias"]
lowercase__ : Optional[int] = checkpoint["input_blocks.0.0.weight"]
lowercase__ : List[Any] = checkpoint["input_blocks.0.0.bias"]
lowercase__ : Tuple = checkpoint["out.0.weight"]
lowercase__ : List[Any] = checkpoint["out.0.bias"]
lowercase__ : Tuple = checkpoint["out.2.weight"]
lowercase__ : Optional[Any] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowercase__ : Dict = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowercase__ : str = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
# Retrieves the keys for the middle blocks only
lowercase__ : Tuple = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowercase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
# Retrieves the keys for the output blocks only
lowercase__ : Tuple = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowercase__ : Tuple = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCamelCase__ )
}
for i in range(1 , lowerCamelCase__ ):
lowercase__ : Tuple = (i - 1) // (config["num_res_blocks"] + 1)
lowercase__ : Optional[int] = (i - 1) % (config["num_res_blocks"] + 1)
lowercase__ : List[Any] = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
lowercase__ : Dict = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowercase__ : int = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
lowercase__ : List[str] = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
lowercase__ : Union[str, Any] = renew_resnet_paths(lowerCamelCase__ )
lowercase__ : Optional[int] = {"old": F"""input_blocks.{i}.0""", "new": F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowercase__ : Optional[int] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path, resnet_op] , config=lowerCamelCase__ )
if len(lowerCamelCase__ ):
lowercase__ : Tuple = renew_attention_paths(lowerCamelCase__ )
lowercase__ : str = {
"old": F"""input_blocks.{i}.1""",
"new": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase__ : List[str] = {
F"""input_blocks.{i}.1.qkv.bias""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
"key": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCamelCase__ , config=lowerCamelCase__ , )
lowercase__ : int = middle_blocks[0]
lowercase__ : Dict = middle_blocks[1]
lowercase__ : Dict = middle_blocks[2]
lowercase__ : Any = renew_resnet_paths(lowerCamelCase__ )
assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ )
lowercase__ : List[Any] = renew_resnet_paths(lowerCamelCase__ )
assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ )
lowercase__ : Optional[int] = renew_attention_paths(lowerCamelCase__ )
lowercase__ : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , attention_paths_to_split=lowerCamelCase__ , config=lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
lowercase__ : List[Any] = i // (config["num_res_blocks"] + 1)
lowercase__ : Optional[int] = i % (config["num_res_blocks"] + 1)
lowercase__ : List[Any] = [shave_segments(lowerCamelCase__ , 2 ) for name in output_blocks[i]]
lowercase__ : Optional[Any] = {}
for layer in output_block_layers:
lowercase__ , lowercase__ : str = layer.split("." )[0], shave_segments(lowerCamelCase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCamelCase__ )
else:
lowercase__ : Tuple = [layer_name]
if len(lowerCamelCase__ ) > 1:
lowercase__ : Dict = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
lowercase__ : Dict = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
lowercase__ : Optional[Any] = renew_resnet_paths(lowerCamelCase__ )
lowercase__ : Optional[Any] = renew_resnet_paths(lowerCamelCase__ )
lowercase__ : Tuple = {"old": F"""output_blocks.{i}.0""", "new": F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase__ : List[str] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowercase__ : Tuple = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
lowercase__ : Optional[Any] = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCamelCase__ ) == 2:
lowercase__ : int = []
if len(lowerCamelCase__ ):
lowercase__ : Tuple = renew_attention_paths(lowerCamelCase__ )
lowercase__ : str = {
"old": F"""output_blocks.{i}.1""",
"new": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase__ : Union[str, Any] = {
F"""output_blocks.{i}.1.qkv.bias""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
"key": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=lowerCamelCase__ , )
else:
lowercase__ : int = renew_resnet_paths(lowerCamelCase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase__ : List[Any] = ".".join(["output_blocks", str(lowerCamelCase__ ), path["old"]] )
lowercase__ : Any = ".".join(["up_blocks", str(lowerCamelCase__ ), "resnets", str(lowerCamelCase__ ), path["new"]] )
lowercase__ : List[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCAmelCase__ = json.loads(f.read())
lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCAmelCase__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 121
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCamelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sgugger/tiny-distilbert-classification'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , only_pretrain_model=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , torchscript=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , fpaa=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
UpperCamelCase = None
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A_ , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tinier_bart'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tinier_bart'
UpperCamelCase = AutoConfig.from_pretrained(A_ )
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ , configs=[config] )
UpperCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , save_to_csv=A_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(A_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(A_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(A_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(A_ , 'env.csv' ) , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ , 'env.csv' ) ).exists() )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ ):
self.assertTrue(hasattr(A_ , 'sequential' ) )
self.assertTrue(hasattr(A_ , 'cumulative' ) )
self.assertTrue(hasattr(A_ , 'current' ) )
self.assertTrue(hasattr(A_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A_ , inference=A_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A_ , 'log.txt' ) , log_print=A_ , trace_memory_line_by_line=A_ , multi_process=A_ , )
UpperCamelCase = PyTorchBenchmark(A_ )
UpperCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ , 'log.txt' ) ).exists() )
| 222
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCamelCase ( _a ):
lowercase : str ='mobilenet_v1'
def __init__( self, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=1.0, lowerCAmelCase=8, lowerCAmelCase="relu6", lowerCAmelCase=True, lowerCAmelCase=0.9_9_9, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_0_1, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =depth_multiplier
lowerCamelCase_ =min_depth
lowerCamelCase_ =hidden_act
lowerCamelCase_ =tf_padding
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
class __UpperCamelCase ( _a ):
lowercase : Optional[int] =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 369
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6
| 0
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
lowerCamelCase : List[str] = os.path.abspath(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowerCamelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowerCamelCase : Optional[Any] = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase : List[Any] = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return flax_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase : Dict = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase : Union[str, Any] = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase : List[Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase : List[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[str] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase : Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase : str = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase : Any = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase : Tuple = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase : str = pt_tuple_key[-2] + "_v"
if name is not None:
lowerCamelCase : List[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase : Dict = flax_model.params["params"]
else:
lowerCamelCase : Any = flax_model.params
lowerCamelCase : Tuple = flatten_dict(SCREAMING_SNAKE_CASE_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase : Union[str, Any] = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = {}
lowerCamelCase : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase : str = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCamelCase : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase , lowerCamelCase : Dict = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add model prefix if necessary
lowerCamelCase : List[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase : List[str] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase : Union[str, Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase : int = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import torch
# Load the index
lowerCamelCase : List[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase : str = flax_model.params["params"]
lowerCamelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
lowerCamelCase : str = flax_model.params
lowerCamelCase : Union[str, Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase : List[str] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCamelCase : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase , lowerCamelCase : Optional[Any] = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add model prefix if necessary
lowerCamelCase : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
if "var" in flax_key[-1]:
lowerCamelCase : Optional[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE_ )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase : List[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = os.path.abspath(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowerCamelCase : int = getattr(SCREAMING_SNAKE_CASE_ , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as state_f:
try:
lowerCamelCase : Tuple = from_bytes(SCREAMING_SNAKE_CASE_ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowerCamelCase : int = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE_ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE_ ) ).values()
if any(SCREAMING_SNAKE_CASE_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowerCamelCase : List[Any] = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = pt_model.state_dict()
lowerCamelCase : Any = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase : str = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase : int = []
lowerCamelCase : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase : Any = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase : str = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : List[str] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE_ ) not in pt_model_dict:
# conv layer
lowerCamelCase : str = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase : Optional[Any] = jnp.transpose(SCREAMING_SNAKE_CASE_ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ) not in pt_model_dict:
# linear layer
lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase : List[Any] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase : int = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
lowerCamelCase : Tuple = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase : List[Any] = ".".join(SCREAMING_SNAKE_CASE_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase : Union[str, Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase : int = key.split("." )
lowerCamelCase : Any = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase : Tuple = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase : List[Any] = key_components[-2] + "_v"
if name is not None:
lowerCamelCase : Union[str, Any] = key_components[:-3] + [name]
lowerCamelCase : Optional[int] = ".".join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = key
if flax_key in special_pt_names:
lowerCamelCase : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCamelCase : Dict = np.asarray(SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) else flax_tensor
lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE_ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# re-transform missing_keys to list
lowerCamelCase : int = list(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 283
|
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__UpperCAmelCase : Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : int = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase__ : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 16
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 273
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase__ = logging.get_logger(__name__)
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def lowerCAmelCase ( self : int , UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Any = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(UpperCamelCase__ ) )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : List[Any] = [sequences]
snake_case : List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[Any]=ZeroShotClassificationArgumentHandler() , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = args_parser
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=TruncationStrategy.ONLY_FIRST , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
snake_case : List[Any] = self.tokenizer.eos_token
try:
snake_case : Optional[Any] = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , )
except Exception as e:
if "too short" in str(UpperCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case : Tuple = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase ( self : Tuple , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
if kwargs.get('''multi_class''' , UpperCamelCase__ ) is not None:
snake_case : Union[str, Any] = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
snake_case : Union[str, Any] = {}
if "candidate_labels" in kwargs:
snake_case : Tuple = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
snake_case : Optional[int] = kwargs['''hypothesis_template''']
snake_case : int = {}
if "multi_label" in kwargs:
snake_case : List[str] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : int , UpperCamelCase__ : Union[str, List[str]] , *UpperCamelCase__ : int , **UpperCamelCase__ : int , ) -> Union[str, Any]:
"""simple docstring"""
if len(UpperCamelCase__ ) == 0:
pass
elif len(UpperCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
snake_case : List[str] = args[0]
else:
raise ValueError(f'Unable to understand extra arguments {args}' )
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]="This example is {}." ) -> str:
"""simple docstring"""
snake_case ,snake_case : Dict = self._args_parser(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
snake_case : str = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase ( self : int , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
snake_case : int = inputs['''candidate_label''']
snake_case : Tuple = inputs['''sequence''']
snake_case : Union[str, Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case : str = self.model(**UpperCamelCase__ )
snake_case : Optional[Any] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=False ) -> Dict:
"""simple docstring"""
snake_case : int = [outputs['''candidate_label'''] for outputs in model_outputs]
snake_case : Union[str, Any] = [outputs['''sequence'''] for outputs in model_outputs]
snake_case : str = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
snake_case : Optional[int] = logits.shape[0]
snake_case : str = len(UpperCamelCase__ )
snake_case : List[str] = N // n
snake_case : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case : Optional[Any] = self.entailment_id
snake_case : Tuple = -1 if entailment_id == 0 else 0
snake_case : Optional[int] = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case : Any = np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ )
snake_case : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case : Dict = reshaped_outputs[..., self.entailment_id]
snake_case : Optional[int] = np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ )
snake_case : List[Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 83
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1000 ) -> int:
'''simple docstring'''
snake_case : Dict = 1
snake_case : str = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE__ , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE__ ):
snake_case : Dict = len(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE__ )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase__ ( a ) -> str:
_A: List[str] = []
for line in lines:
_A: Optional[Any] = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
_A: Tuple = '''\n'''.join(a )
# Make a hash from all this code
_A: List[Any] = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase__ : Tuple = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase__ : Optional[Any] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase__ : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 121
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase__ : Union[str, Any] = TypeVar('T')
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
__UpperCamelCase : deque[T] # Cache store of keys
__UpperCamelCase : set[T] # References of the keys in cache
__UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Tuple = deque()
_A: List[Any] = set()
if not n:
_A: str = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_A: Dict = n
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_A: Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase_ )
else:
self.dq_store.remove(lowerCAmelCase_ )
self.dq_store.appendleft(lowerCAmelCase_ )
self.key_reference.add(lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for k in self.dq_store:
print(lowerCAmelCase_ )
def __repr__( self : Dict ):
"""simple docstring"""
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 121
| 1
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309
|
"""simple docstring"""
__UpperCamelCase : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] , _UpperCAmelCase : int , _UpperCAmelCase : list[bool] ):
lowerCAmelCase = True
lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict[int, list[int]] ):
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
lowerCAmelCase = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
lowerCAmelCase = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
lowerCAmelCase = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
| 6
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = r"\w+[.]\d+"
lowercase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase__ = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=42 ):
# Step 1: Convert pytorch tensor to numpy
lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowercase__ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowercase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 224
| 0
|
"""simple docstring"""
import qiskit
def _snake_case ( _snake_case : int , _snake_case : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_A = qiskit.Aer.get_backend('aer_simulator' )
_A = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_A = qiskit.execute(_snake_case , _snake_case , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
a = half_adder(1, 1)
print(F'''Half Adder Output Qubit Counts: {counts}''')
| 315
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315
| 1
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 100 ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =n * (n + 1) * (2 * n + 1) / 6
lowerCamelCase__ : Union[str, Any] =(n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 272
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272
| 1
|
'''simple docstring'''
from math import factorial
def A__ ( UpperCAmelCase_ = 1_0_0 ):
return sum(map(UpperCAmelCase_ , str(factorial(UpperCAmelCase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 83
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
snake_case_ : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
snake_case_ : List[str] = '▁'
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase : Dict = (
AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ,normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Tuple = do_lower_case
_UpperCamelCase : str = remove_space
_UpperCamelCase : Optional[Any] = keep_accents
_UpperCamelCase : Dict = vocab_file
_UpperCamelCase : Dict = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : int = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 83
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 364
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
_lowercase : int
_lowercase : TreeNode | None = None
_lowercase : TreeNode | None = None
lowerCAmelCase = namedtuple('CoinsDistribResult', 'moves excess')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
__lowerCAmelCase : int = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 3_8_4}
_lowerCAmelCase : Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_lowerCAmelCase : List[str] = do_resize
_lowerCAmelCase : Dict = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCAmelCase : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCAmelCase : List[str] = resample
_lowerCAmelCase : int = do_rescale
_lowerCAmelCase : Tuple = rescale_factor
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_lowerCAmelCase : List[Any] = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCAmelCase : Dict = int(shortest_edge / crop_pct )
_lowerCAmelCase : int = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
_lowerCAmelCase : Optional[Any] = resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=snake_case_ , size=(shortest_edge, shortest_edge) , data_format=snake_case_ , **snake_case_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
snake_case_ , size=(shortest_edge, shortest_edge) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = crop_pct if crop_pct is not None else self.crop_pct
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : int = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_lowerCAmelCase : Any = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_lowerCAmelCase : List[str] = [self.resize(image=snake_case_ , size=snake_case_ , crop_pct=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Any = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_lowerCAmelCase : str = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_lowerCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase : str = '''layoutlmv3'''
def __init__( self : Optional[Any] , a : List[str]=50_265 , a : Optional[Any]=768 , a : str=12 , a : str=12 , a : List[Any]=3_072 , a : List[str]="gelu" , a : Any=0.1 , a : int=0.1 , a : Tuple=512 , a : Any=2 , a : Dict=0.02 , a : List[str]=1E-5 , a : Dict=1 , a : List[str]=0 , a : str=2 , a : Optional[int]=1_024 , a : Optional[int]=128 , a : List[str]=128 , a : int=True , a : Optional[Any]=32 , a : List[str]=128 , a : List[Any]=64 , a : Tuple=256 , a : Dict=True , a : Optional[Any]=True , a : Union[str, Any]=True , a : Tuple=224 , a : str=3 , a : Union[str, Any]=16 , a : Optional[int]=None , **a : Optional[Any] , )-> Dict:
"""simple docstring"""
super().__init__(
vocab_size=A__ , hidden_size=A__ , num_hidden_layers=A__ , num_attention_heads=A__ , intermediate_size=A__ , hidden_act=A__ , hidden_dropout_prob=A__ , attention_probs_dropout_prob=A__ , max_position_embeddings=A__ , type_vocab_size=A__ , initializer_range=A__ , layer_norm_eps=A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ , )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase : List[Any] = version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple:
"""simple docstring"""
return 12
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[Any] , a : Union[str, Any] = -1 , a : Dict = -1 , a : Dict = False , a : Union[str, Any] = None , a : Optional[Any] = 3 , a : List[Any] = 40 , a : Any = 40 , )-> Optional[int]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , A__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(A__ )
lowercase__ = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A__ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(A__ , A__ , A__ , A__ )
lowercase__ = dict(
processor(
A__ , text=A__ , boxes=A__ , return_tensors=A__ , ) )
return inputs
| 359
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""camembert-base""": 512,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = ['input_ids', 'attention_mask']
_UpperCamelCase : Optional[int] = CamembertTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : str=None , a : Tuple="<s>" , a : Tuple="</s>" , a : Optional[int]="</s>" , a : int="<s>" , a : Dict="<unk>" , a : Optional[Any]="<pad>" , a : Union[str, Any]="<mask>" , a : Any=["<s>NOTUSED", "</s>NOTUSED"] , **a : int , )-> str:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , additional_special_tokens=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 269
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "microsoft/speecht5_tts"
lowercase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowercase = "text_reader"
lowercase = SpeechTaProcessor
lowercase = SpeechTaForTextToSpeech
lowercase = SpeechTaHifiGan
lowercase = ["text"]
lowercase = ["audio"]
def lowerCamelCase ( self : str ):
if self.post_processor is None:
snake_case__ : str = """microsoft/speecht5_hifigan"""
super().setup()
def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[Any]=None ):
snake_case__ : Dict = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
snake_case__ : int = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
snake_case__ : Dict = torch.tensor(embeddings_dataset[7_305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : int ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 35
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """linear"""
_SCREAMING_SNAKE_CASE = """cosine"""
_SCREAMING_SNAKE_CASE = """cosine_with_restarts"""
_SCREAMING_SNAKE_CASE = """polynomial"""
_SCREAMING_SNAKE_CASE = """constant"""
_SCREAMING_SNAKE_CASE = """constant_with_warmup"""
_SCREAMING_SNAKE_CASE = """piecewise_constant"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int = -1 ) -> Tuple:
"""simple docstring"""
return LambdaLR(lowerCAmelCase__ , lambda lowerCAmelCase__ : 1 , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int = -1 ) -> str:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1.0 , lowerCAmelCase__ ) )
return 1.0
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : str , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = rule_str.split(':' )
lowerCAmelCase_ : List[str] = int(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = float(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = value
lowerCAmelCase_ : str = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
def rule_func(lowerCAmelCase__ : int ) -> float:
lowerCAmelCase_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase_ : Dict = create_rules_function(lowerCAmelCase__ , lowerCAmelCase__ )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]=-1 ) -> Any:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float = 0.5 , lowerCAmelCase__ : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : Any ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = -1 ) -> int:
"""simple docstring"""
def lr_lambda(lowerCAmelCase__ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : int=-1 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(lowerCAmelCase__ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase__ ) / float(max(1 , lowerCAmelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase_ : Any = lr_init - lr_end
lowerCAmelCase_ : int = num_training_steps - num_warmup_steps
lowerCAmelCase_ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase_ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, SchedulerType] , lowerCAmelCase__ : Optimizer , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = -1 , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = SchedulerType(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase__ , step_rules=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , num_cycles=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , power=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ , )
return schedule_func(
lowerCAmelCase__ , num_warmup_steps=lowerCAmelCase__ , num_training_steps=lowerCAmelCase__ , last_epoch=lowerCAmelCase__ )
| 224
| 0
|
"""simple docstring"""
from collections.abc import Callable
def __A ( a_ :Callable[[float], float] , a_ :float , a_ :float) -> float:
__a : float = a
__a : float = b
if function(a_) == 0: # one of the a or b is a root for the function
return a
elif function(a_) == 0:
return b
elif (
function(a_) * function(a_) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''')
else:
__a : float = start + (end - start) / 2.0
while abs(start - mid) > 10**-7: # until precisely equals to 10^-7
if function(a_) == 0:
return mid
elif function(a_) * function(a_) < 0:
__a : Union[str, Any] = mid
else:
__a : List[str] = mid
__a : str = start + (end - start) / 2.0
return mid
def __A ( a_ :float) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
def __A ( a_ :int = 1_00_00_00) -> int:
__a : Tuple = [i - 1 for i in range(limit + 1)]
for i in range(2 , limit + 1):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a_):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1])
if __name__ == "__main__":
print(solution())
| 188
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=6_4 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=6_4 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCamelCase_: Optional[int] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: List[Any] = seq_length
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: List[str] = use_input_mask
UpperCamelCase_: List[str] = use_token_type_ids
UpperCamelCase_: Optional[Any] = use_labels
UpperCamelCase_: List[Any] = vocab_size
UpperCamelCase_: Optional[Any] = hidden_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Tuple = intermediate_size
UpperCamelCase_: List[Any] = hidden_act
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: Optional[Any] = type_vocab_size
UpperCamelCase_: Optional[Any] = type_sequence_label_size
UpperCamelCase_: Dict = initializer_range
UpperCamelCase_: Optional[int] = num_labels
UpperCamelCase_: List[str] = num_choices
UpperCamelCase_: Dict = scope
def _a ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _a ( self ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = None
if self.use_input_mask:
UpperCamelCase_: str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Dict = None
UpperCamelCase_: Dict = None
UpperCamelCase_: Union[str, Any] = None
if self.use_labels:
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = MPNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Any = model(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = MPNetForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = self.num_labels
UpperCamelCase_: int = MPNetForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = self.num_choices
UpperCamelCase_: Union[str, Any] = MPNetForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = self.num_labels
UpperCamelCase_: Dict = MPNetForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(UpperCamelCase_): List[Any] = config_and_inputs
UpperCamelCase_: int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Dict =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a : Tuple =(
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a : str =False
a : Union[str, Any] =True
def _a ( self ):
UpperCamelCase_: Any = MPNetModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*_lowerCamelCase )
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
UpperCamelCase_: Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase_: Tuple = model(_lowerCamelCase )[0]
UpperCamelCase_: Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase_: str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 370
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
A_ : List[str] = '.'
if __name__ == "__main__":
A_ : Dict = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
A_ : Dict = []
A_ : Optional[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
A_ : Tuple = line.strip()
A_ : Any = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
A_ : str = '\n'.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 292
| 0
|
import functools
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
__lowerCamelCase = len(A__ )
@functools.cache
def min_distance(A__ : int , A__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowerCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A__ ) , 1 + min_distance(A__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93
| 0
|
"""simple docstring"""
import numpy as np
import datasets
__A : Optional[int] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__A : Any = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__A : List[str] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence") , id="X"),
}) , )
def SCREAMING_SNAKE_CASE__ (self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
# convert to numpy arrays
A = np.array(__SCREAMING_SNAKE_CASE)
A = np.array(__SCREAMING_SNAKE_CASE)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")
# Get mahalanobis distance for each prediction
A = X - np.mean(__SCREAMING_SNAKE_CASE)
A = np.cov(reference_distribution.T)
try:
A = np.linalg.inv(__SCREAMING_SNAKE_CASE)
except np.linalg.LinAlgError:
A = np.linalg.pinv(__SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = np.dot(__SCREAMING_SNAKE_CASE , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 363
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
__A : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : str = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = "lm_head"
A = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
A = getattr(lowercase__ , lowercase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == "group" , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A = True
if "*" in mapped_key:
A = name.split(lowercase__ )[0].split("." )[-2]
A = mapped_key.replace("*" , lowercase__ )
if "weight_g" in name:
A = "weight_g"
elif "weight_v" in name:
A = "weight_v"
elif "bias" in name:
A = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = "weight"
else:
A = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = full_name.split("conv_layers." )[-1]
A = name.split("." )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True ):
"""simple docstring"""
if config_path is not None:
A = UniSpeechConfig.from_pretrained(lowercase__ )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(lowercase__ , "vocab.json" )
if not os.path.isdir(lowercase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(lowercase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
A = WavaVecaPhonemeCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase__ , )
A = True if config.feat_extract_norm == "layer" else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
A = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
A = UniSpeechForCTC(lowercase__ )
else:
A = UniSpeechForPreTraining(lowercase__ )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_unispeech.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 57
| 0
|
from manim import *
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('CPU' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
__lowerCamelCase = [mem.copy() for i in range(4 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('GPU' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('Model' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
__lowerCamelCase = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowerCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
__lowerCamelCase = [mem.copy() for i in range(6 )]
__lowerCamelCase = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__lowerCamelCase = Text('Loaded Checkpoint' , font_size=24 )
__lowerCamelCase = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__lowerCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__lowerCamelCase = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
__lowerCamelCase = []
__lowerCamelCase = []
for i, rect in enumerate(lowerCamelCase__ ):
__lowerCamelCase = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
__lowerCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 90
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> list:
__lowerCAmelCase : Dict = []
__lowerCAmelCase , __lowerCAmelCase : Any = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : int = result + left + right
return input_list
def _lowercase ( __snake_case ) -> list:
if len(__snake_case ) <= 1:
return input_list
__lowerCAmelCase : int = list(__snake_case )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(__snake_case ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : Union[str, Any] = i
__lowerCAmelCase : Tuple = i + p - 1
__lowerCAmelCase : Optional[Any] = (low + high + 1) // 2
__lowerCAmelCase : Any = merge(__snake_case ,__snake_case ,__snake_case ,__snake_case )
# final merge of last two parts
if p * 2 >= len(__snake_case ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Union[str, Any] = merge(__snake_case ,0 ,__snake_case ,len(__snake_case ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__snake_case : Optional[int] = []
else:
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 269
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> bool:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = [int(_lowerCamelCase) for i in ip_va_address.split(".") if i.isdigit()]
return len(_lowerCamelCase) == 4 and all(0 <= int(_lowerCamelCase) <= 254 for octet in octets)
if __name__ == "__main__":
lowercase : List[str] = input().strip()
lowercase : Union[str, Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 151
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :List[Any] , a :Optional[Any]=False , a :List[str]=True , a :str=False , a :Optional[Any]="<s>" , a :Tuple="</s>" , a :int="<unk>" , a :Optional[Any]="<sep>" , a :List[str]="<pad>" , a :Any="<cls>" , a :List[Any]="<mask>" , a :Optional[Any]=["<eop>", "<eod>"] , a :Optional[Dict[str, Any]] = None , **a :List[str] , ) -> None:
__UpperCamelCase : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : int = 3
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : int = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCamelCase : Optional[Any] = jieba
__UpperCamelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
return len(self.sp_model )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :Optional[int] , a :Dict ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> int:
if self.remove_space:
__UpperCamelCase : int = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : Tuple = unicodedata.normalize("NFKD" , a )
__UpperCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
__UpperCamelCase : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[Any] = self.preprocess_text(a )
__UpperCamelCase : int = self.sp_model.encode(a , out_type=a )
__UpperCamelCase : Optional[Any] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : List[str] = cur_pieces[1:]
else:
__UpperCamelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self :Tuple , a :int ) -> Tuple:
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : str = "".join(a ).replace(a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1, 1]
return ([0] * len(a )) + [1, 1]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , *a :str , **a :Any ) -> Tuple:
__UpperCamelCase : int = super()._decode(*a , **a )
__UpperCamelCase : int = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 151
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( lowercase : list[list[float]] ) -> List[Any]:
"""simple docstring"""
snake_case : str = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_A ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case : Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
snake_case : int = [[0.0, 0.0], [0.0, 0.0]]
snake_case ,snake_case : Optional[int] = matrix[1][1], matrix[0][0]
snake_case ,snake_case : int = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_A ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_A ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
snake_case : Any = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case : List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case : List[str] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case : List[Any] = array(_A )
for i in range(3 ):
for j in range(3 ):
snake_case : int = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case : Any = array(_A )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_A )
# Calculate the inverse of the matrix
return [[float(d(_A ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 203
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = '''wav2vec2'''
def __init__( self, A=32, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=0.1, A=0.0, A=0.0, A=0.1, A=0.1, A=0.02, A=1E-5, A="group", A="gelu", A=(512, 512, 512, 512, 512, 512, 512), A=(5, 2, 2, 2, 2, 2, 2), A=(10, 3, 3, 3, 3, 2, 2), A=False, A=128, A=16, A=False, A=True, A=0.05, A=10, A=2, A=0.0, A=10, A=0, A=320, A=2, A=0.1, A=100, A=256, A=256, A=0.1, A="sum", A=False, A=False, A=256, A=(512, 512, 512, 512, 1_500), A=(5, 3, 3, 1, 1), A=(1, 2, 3, 1, 1), A=512, A=0, A=1, A=2, A=False, A=3, A=2, A=3, A=None, A=None, **A, ):
'''simple docstring'''
super().__init__(**A, pad_token_id=A, bos_token_id=A, eos_token_id=A )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE : List[str] = list(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(A )
SCREAMING_SNAKE_CASE : Any = list(A )
SCREAMING_SNAKE_CASE : Any = conv_bias
SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim )
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : Tuple = feat_proj_dropout
SCREAMING_SNAKE_CASE : List[str] = final_dropout
SCREAMING_SNAKE_CASE : Tuple = layerdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : int = do_stable_layer_norm
SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : List[str] = apply_spec_augment
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_prob
SCREAMING_SNAKE_CASE : Any = mask_time_length
SCREAMING_SNAKE_CASE : int = mask_time_min_masks
SCREAMING_SNAKE_CASE : int = mask_feature_prob
SCREAMING_SNAKE_CASE : int = mask_feature_length
SCREAMING_SNAKE_CASE : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : Optional[int] = num_codevectors_per_group
SCREAMING_SNAKE_CASE : int = num_codevector_groups
SCREAMING_SNAKE_CASE : str = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : Optional[int] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE : Tuple = num_negatives
SCREAMING_SNAKE_CASE : Optional[int] = codevector_dim
SCREAMING_SNAKE_CASE : Optional[int] = proj_codevector_dim
SCREAMING_SNAKE_CASE : List[str] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Optional[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : str = add_adapter
SCREAMING_SNAKE_CASE : int = adapter_kernel_size
SCREAMING_SNAKE_CASE : str = adapter_stride
SCREAMING_SNAKE_CASE : Optional[int] = num_adapter_layers
SCREAMING_SNAKE_CASE : List[str] = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : List[Any] = list(A )
SCREAMING_SNAKE_CASE : int = list(A )
SCREAMING_SNAKE_CASE : Optional[Any] = list(A )
SCREAMING_SNAKE_CASE : Dict = xvector_output_dim
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 246
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tree
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model'}
a = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a = {'bert_for_seq_generation': 512}
class lowercase_ ( lowercase_ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Optional[int]="<pad>" , _UpperCAmelCase : Tuple="<::::>" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[Any] , ):
_A = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[int] , _UpperCAmelCase : Dict ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
return self.sp_model.piece_to_id(__UpperCamelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] ):
_A = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int ):
_A = []
_A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_A = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 315
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292
| 0
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369
|
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(matrix[0] )
UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase , UpperCamelCase = matrix[i], matrix[row]
UpperCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 0
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
while second != 0:
UpperCamelCase = first & second
first ^= second
UpperCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("""Enter the first number: """).strip())
_SCREAMING_SNAKE_CASE = int(input("""Enter the second number: """).strip())
print(F'''{add(first, second) = }''')
| 343
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 0
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase_ : str = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCAmelCase_ : Tuple = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = SavedModel()
_UpperCAmelCase : Optional[Any] = []
with open(os.path.join(lowerCAmelCase_ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
_UpperCAmelCase : List[str] = json.load(lowerCAmelCase_ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCAmelCase_ )] )
with open(lowerCAmelCase_ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
_UpperCAmelCase : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_UpperCAmelCase : List[Any] = sorted(lowerCAmelCase_ )
_UpperCAmelCase : List[str] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCAmelCase_ )
if strict and len(lowerCAmelCase_ ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(lowerCAmelCase_ ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*lowerCAmelCase_ , sep="""\n""" )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCAmelCase_ : Optional[int] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 370
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : int = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170
| 0
|
'''simple docstring'''
import os
import sys
import unittest
lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase__ = os.path.join(git_repo_path, "src", "diffusers")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> List[str]:
UpperCAmelCase : str = find_backend(' if not is_torch_available():' )
self.assertEqual(lowercase_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase : int = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowercase_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase : Optional[int] = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowercase_ , 'torch_and_transformers_and_onnx' )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowercase_ )
self.assertIn('torch_and_transformers' , lowercase_ )
self.assertIn('flax_and_transformers' , lowercase_ )
self.assertIn('torch_and_transformers_and_onnx' , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowercase_ , '\nCONSTANT = None\n' )
UpperCAmelCase : Optional[Any] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowercase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCAmelCase : int = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
UpperCAmelCase : Optional[Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
UpperCAmelCase : List[Any] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowercase_ )
| 151
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int=3 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=3 , lowercase_ : Tuple=10 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Dict="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=None , ) -> int:
UpperCAmelCase : Dict = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Any = image_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[str] = embeddings_size
UpperCAmelCase : str = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : int = use_labels
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Union[str, Any] = scope
UpperCAmelCase : Any = len(lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = TFResNetModel(config=lowercase_ )
UpperCAmelCase : int = model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFResNetForImageClassification(lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ : Dict = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Optional[int] = TFResNetModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ )
UpperCAmelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : List[Any] = layer_type
UpperCAmelCase : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = TFResNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Any = self.default_image_processor
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase : List[Any] = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : int = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 151
| 1
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCAmelCase = 25_60_47
__UpperCAmelCase = 25_61_45
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = NllbTokenizer
__snake_case : List[Any] = NllbTokenizerFast
__snake_case : int = True
__snake_case : int = True
__snake_case : int = {}
def A ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Tuple = NllbTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : int ):
lowerCAmelCase_ : Any = NllbTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCAmelCase_ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowerCAmelCase_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ : int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def A ( self : int ):
lowerCAmelCase_ : int = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Any = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : Dict = tokenizer_r.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
lowerCAmelCase_ : Any = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Dict = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
lowerCAmelCase_ : Any = tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Any = tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
@require_torch
def A ( self : List[Any] ):
if not self.test_seqaseq:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
lowerCAmelCase_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase_ : Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
lowerCAmelCase_ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase , tgt_texts=UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowerCAmelCase_ : Optional[int] = tokenizer.prepare_seqaseq_batch(
UpperCAmelCase , tgt_texts=UpperCAmelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCAmelCase_ : Any = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , UpperCAmelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def A ( self : Any ):
pass
def A ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : Optional[int] = [AddedToken("""<special>""" , lstrip=UpperCAmelCase )]
lowerCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : int = tokenizer_r.encode("""Hey this is a <special> token""" )
lowerCAmelCase_ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=UpperCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = tokenizer_p.encode("""Hey this is a <special> token""" )
lowerCAmelCase_ : List[Any] = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
__snake_case : List[str] = """facebook/nllb-200-distilled-600M"""
__snake_case : Dict = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__snake_case : Dict = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__snake_case : Optional[Any] = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def A ( cls : Union[str, Any] ):
lowerCAmelCase_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
lowerCAmelCase_ : int = 1
return cls
def A ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
def A ( self : List[str] ):
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase_ : Tuple = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
lowerCAmelCase_ : Optional[Any] = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Any = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , UpperCAmelCase )
lowerCAmelCase_ : str = 10
lowerCAmelCase_ : str = self.tokenizer(UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def A ( self : List[Any] ):
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : List[str] = NllbTokenizer.from_pretrained(UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase )
@require_torch
def A ( self : Optional[Any] ):
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase_ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowerCAmelCase_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A ( self : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase_ : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = targets["""input_ids"""]
lowerCAmelCase_ : List[Any] = shift_tokens_right(
UpperCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A ( self : Tuple ):
lowerCAmelCase_ : int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def A ( self : Any ):
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : str = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 355
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 0
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCamelCase__ : str = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[int] ) -> List[str]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> int:
_UpperCAmelCase : Optional[int] = _TestCommandArgs(dataset=_lowerCAmelCase, all_configs=_lowerCAmelCase, save_infos=_lowerCAmelCase )
_UpperCAmelCase : Dict = TestCommand(*_lowerCAmelCase )
test_command.run()
_UpperCAmelCase : Optional[int] = os.path.join(_lowerCAmelCase, """README.md""" )
assert os.path.exists(_lowerCAmelCase )
_UpperCAmelCase : int = DatasetInfosDict.from_directory(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ), splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
], download_size=3940680, dataset_size=2589981, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_UpperCAmelCase , _UpperCAmelCase : str = getattr(dataset_infos["""default"""], _lowerCAmelCase ), getattr(expected_dataset_infos["""default"""], _lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(_lowerCAmelCase, _lowerCAmelCase )
elif key == "splits":
assert list(_lowerCAmelCase ) == list(_lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 246
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any]=8 ) -> Union[str, Any]:
_UpperCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=512, _lowerCAmelCase : Optional[Any]=512 ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
_UpperCAmelCase : List[Any] = np.array(pil_image.convert("""RGB""" ) )
_UpperCAmelCase : str = arr.astype(np.floataa ) / 127.5 - 1
_UpperCAmelCase : Dict = np.transpose(_lowerCAmelCase, [2, 0, 1] )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( __a):
def __init__( self , _A , _A , _A , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
_UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , _A , _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = min(int(num_inference_steps * strength ) , _A )
_UpperCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A=None ) -> List[Any]:
'''simple docstring'''
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' )
_UpperCAmelCase : Any = image.to(device=_A , dtype=_A )
_UpperCAmelCase : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_UpperCAmelCase : Dict = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_A , _A ):
_UpperCAmelCase : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
_UpperCAmelCase : List[Any] = torch.cat(_A , dim=0 )
else:
_UpperCAmelCase : str = self.movq.encode(_A ).latent_dist.sample(_A )
_UpperCAmelCase : Any = self.movq.config.scaling_factor * init_latents
_UpperCAmelCase : List[Any] = torch.cat([init_latents] , dim=0 )
_UpperCAmelCase : Union[str, Any] = init_latents.shape
_UpperCAmelCase : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
_UpperCAmelCase : Optional[int] = self.scheduler.add_noise(_A , _A , _A )
_UpperCAmelCase : Optional[int] = init_latents
return latents
def __snake_case ( self , _A=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __snake_case ( self , _A=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_UpperCAmelCase : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_12 , _A = 5_12 , _A = 1_00 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self._execution_device
_UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(_A , _A ):
_UpperCAmelCase : Dict = torch.cat(_A , dim=0 )
_UpperCAmelCase : Any = image_embeds.shape[0]
if isinstance(_A , _A ):
_UpperCAmelCase : Any = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : str = image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
_UpperCAmelCase : str = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_UpperCAmelCase : Union[str, Any] = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
_UpperCAmelCase : List[Any] = image.to(dtype=image_embeds.dtype , device=_A )
_UpperCAmelCase : int = self.movq.encode(_A )["""latents"""]
_UpperCAmelCase : Dict = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(_A , _A , _A )
_UpperCAmelCase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_UpperCAmelCase , _UpperCAmelCase : str = downscale_height_and_width(_A , _A , self.movq_scale_factor )
_UpperCAmelCase : List[Any] = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
_UpperCAmelCase : str = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = variance_pred.chunk(2 )
_UpperCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
_UpperCAmelCase : Optional[int] = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Any = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 246
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :List[Any] = logging.get_logger(__name__)
a :Tuple = {
"Salesforce/blip-vqa-base": "https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json",
"Salesforce/blip-vqa-capfit-large": (
"https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-base": (
"https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"
),
"Salesforce/blip-image-captioning-large": (
"https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"
),
"Salesforce/blip-itm-base-coco": "https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json",
"Salesforce/blip-itm-large-coco": "https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json",
"Salesforce/blip-itm-base-flikr": "https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json",
"Salesforce/blip-itm-large-flikr": (
"https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"
),
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """blip_text_model"""
def __init__( self , _a=30_524 , _a=768 , _a=768 , _a=3_072 , _a=768 , _a=12 , _a=8 , _a=512 , _a="gelu" , _a=1E-1_2 , _a=0.0 , _a=0.0 , _a=0.02 , _a=30_522 , _a=2 , _a=0 , _a=102 , _a=True , _a=True , **_a , ) -> int:
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = encoder_hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = projection_dim
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = is_decoder
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
@classmethod
def _a ( cls , _a , **_a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
SCREAMING_SNAKE_CASE__ : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = """blip_vision_model"""
def __init__( self , _a=768 , _a=3_072 , _a=512 , _a=12 , _a=12 , _a=384 , _a=16 , _a="gelu" , _a=1E-5 , _a=0.0 , _a=1E-1_0 , **_a , ) -> int:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = projection_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
@classmethod
def _a ( cls , _a , **_a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
SCREAMING_SNAKE_CASE__ : str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = """blip"""
_SCREAMING_SNAKE_CASE :List[Any] = True
def __init__( self , _a=None , _a=None , _a=512 , _a=2.6_592 , _a=256 , **_a , ) -> int:
"""simple docstring"""
super().__init__(**_a )
if text_config is None:
SCREAMING_SNAKE_CASE__ : List[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = BlipTextConfig(**_a )
SCREAMING_SNAKE_CASE__ : Dict = BlipVisionConfig(**_a )
SCREAMING_SNAKE_CASE__ : str = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = projection_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logit_scale_init_value
SCREAMING_SNAKE_CASE__ : Any = 1.0
SCREAMING_SNAKE_CASE__ : Dict = 0.02
SCREAMING_SNAKE_CASE__ : int = image_text_hidden_size
@classmethod
def _a ( cls , _a , _a , **_a ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Any = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
| 56
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = RoFormerTokenizer
_SCREAMING_SNAKE_CASE :List[Any] = RoFormerTokenizerFast
_SCREAMING_SNAKE_CASE :Tuple = True
_SCREAMING_SNAKE_CASE :Dict = True
def _a ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
def _a ( self , **_a ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_a )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE__ : List[Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : List[str] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self ) -> Optional[int]:
"""simple docstring"""
pass
| 56
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase : Optional[Any] =logging.get_logger(__name__)
class snake_case__( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ['pixel_values']
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = True , **__lowercase , ) -> None:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase_ : Any = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase_ : Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase_ : Dict = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
lowerCAmelCase_ : int = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : Optional[Any] = resample
lowerCAmelCase_ : Optional[int] = do_rescale
lowerCAmelCase_ : int = rescale_factor
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : int = crop_size
lowerCAmelCase_ : int = do_flip_channel_order
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = PIL.Image.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : List[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase_ : Any = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
lowerCAmelCase_ : Optional[Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Union[str, Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> np.ndarray:
return flip_channel_order(__UpperCAmelCase , data_format=__UpperCAmelCase )
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
lowerCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : str = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCAmelCase_ : Tuple = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[Any] = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' )
lowerCAmelCase_ : Any = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[int] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase_ : Dict = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase_ : int = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Union[str, Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCAmelCase_ : int = [self.flip_channel_order(image=__UpperCAmelCase ) for image in images]
lowerCAmelCase_ : Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase_ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Any:
lowerCAmelCase_ : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = target_sizes.numpy()
lowerCAmelCase_ : Tuple = []
for idx in range(len(__UpperCAmelCase ) ):
lowerCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
lowerCAmelCase_ : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCAmelCase )
else:
lowerCAmelCase_ : Optional[Any] = logits.argmax(dim=1 )
lowerCAmelCase_ : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 262
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Dict = 'detr'
lowerCamelCase__ : Union[str, Any] = ['past_key_values']
lowerCamelCase__ : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] , __UpperCAmelCase : Any=True , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=1_0_0 , __UpperCAmelCase : Optional[Any]=6 , __UpperCAmelCase : List[str]=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Dict=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : str="sine" , __UpperCAmelCase : str="resnet50" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[str]=0.1 , **__UpperCAmelCase : Dict , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None, None, None
SCREAMING_SNAKE_CASE__ = use_timm_backbone
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = auxiliary_loss
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Dict ) -> List[Any]:
return cls(backbone_config=__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class lowerCamelCase (A__ ):
lowerCamelCase__ : Union[str, Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 1_2
| 165
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A ( unittest.TestCase ):
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : str = -1
__snake_case : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : int = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : Union[str, Any] = TextStreamer(__magic_name__ )
model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ , streamer=__magic_name__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : Dict = cs.out[:-1]
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : Any = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : Optional[Any] = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ )
__snake_case : Dict = tokenizer.decode(greedy_ids[0] )
__snake_case : List[Any] = TextIteratorStreamer(__magic_name__ )
__snake_case : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : List[str] = Thread(target=model.generate , kwargs=__magic_name__ )
thread.start()
__snake_case : Dict = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : int = -1
__snake_case : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : Optional[int] = model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ )
__snake_case : Any = greedy_ids[:, input_ids.shape[1] :]
__snake_case : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__snake_case : str = TextStreamer(__magic_name__ , skip_prompt=__magic_name__ )
model.generate(__magic_name__ , max_new_tokens=10 , do_sample=__magic_name__ , streamer=__magic_name__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__snake_case : str = cs.out[:-1]
self.assertEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : List[str] = AutoTokenizer.from_pretrained("""distilgpt2""" )
__snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__magic_name__ )
__snake_case : Any = -1
__snake_case : Union[str, Any] = torch.ones((1, 5) , device=__magic_name__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__snake_case : Any = TextStreamer(__magic_name__ , skip_special_tokens=__magic_name__ )
model.generate(__magic_name__ , max_new_tokens=1 , do_sample=__magic_name__ , streamer=__magic_name__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__snake_case : List[Any] = cs.out[:-1] # Remove the final "\n"
__snake_case : str = tokenizer(__magic_name__ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__magic_name__ )
__snake_case : Union[str, Any] = -1
__snake_case : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__magic_name__ )
__snake_case : str = TextIteratorStreamer(__magic_name__ , timeout=0.001 )
__snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
__snake_case : int = Thread(target=model.generate , kwargs=__magic_name__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__magic_name__ ):
__snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 13
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = CanineTokenizer
lowercase__: Optional[int] = False
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
__snake_case : Optional[Any] = 10_24
return tokenizer
@require_torch
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.canine_tokenizer
__snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.canine_tokenizer
__snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.canine_tokenizer
__snake_case : Optional[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__snake_case : Any = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
__snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__snake_case : List[Any] = chr(0xE007 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE005
__snake_case : Tuple = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
__snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
__snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
__snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Dict = chr(0xE005 )
__snake_case : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__snake_case : Tuple = tokenizer.tokenize(__magic_name__ )
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__snake_case : Optional[Any] = 0xE006
__snake_case : List[str] = chr(__magic_name__ )
__snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Any = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Tuple = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE006
__snake_case : int = chr(__magic_name__ )
__snake_case : List[Any] = [new_token_a]
__snake_case : Union[str, Any] = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__snake_case : Any = 0xE007
__snake_case : Any = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
__snake_case : Union[str, Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[str] = """hello world"""
if self.space_between_special_tokens:
__snake_case : Union[str, Any] = """[CLS] hello world [SEP]"""
else:
__snake_case : List[Any] = input
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Dict = """a"""
__snake_case : Tuple = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
__snake_case : Dict = 0xE006
__snake_case : str = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
| 13
| 1
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Sequence[float] , __magic_name__ : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
UpperCamelCase :str = 0 if allow_empty_subarrays else float("""-inf""" )
UpperCamelCase :Tuple = 0.0
for num in arr:
UpperCamelCase :int = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCamelCase :Optional[int] = max(__magic_name__ , __magic_name__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 38
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( lowerCamelCase__ ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
def __a ( self : Any ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[str] , *_lowercase : List[Any] , **_lowercase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( lowerCamelCase__ ):
lowerCAmelCase_ = FlaxBigBirdForNaturalQuestionsModule
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
def cross_entropy(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(__snake_case )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(__snake_case , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(__snake_case )
return loss
SCREAMING_SNAKE_CASE__ = partial(__snake_case , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE__ = cross_entropy(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE__ = cross_entropy(__snake_case , __snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
lowerCAmelCase_ = "google/bigbird-roberta-base"
lowerCAmelCase_ = 30_00
lowerCAmelCase_ = 1_05_00
lowerCAmelCase_ = 1_28
lowerCAmelCase_ = 3
lowerCAmelCase_ = 1
lowerCAmelCase_ = 5
# tx_args
lowerCAmelCase_ = 3E-5
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 2_00_00
lowerCAmelCase_ = 0.0095
lowerCAmelCase_ = "bigbird-roberta-natural-questions"
lowerCAmelCase_ = "training-expt"
lowerCAmelCase_ = "data/nq-training.jsonl"
lowerCAmelCase_ = "data/nq-validation.jsonl"
def __a ( self : List[str] ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=_lowercase )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self : Any , _lowercase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(_lowercase )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(_lowercase , _lowercase )
return batch
def __a ( self : Union[str, Any] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(_lowercase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_lowercase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def __a ( self : Tuple , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(_lowercase ) for ids in input_ids]
return zip(*_lowercase )
def __a ( self : List[str] , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(_lowercase ) )]
while len(_lowercase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Any=None ) -> Tuple:
"""simple docstring"""
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=__snake_case )
for i in range(len(__snake_case ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__snake_case )
@partial(jax.pmap , axis_name="""batch""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
def loss_fn(__UpperCamelCase : Any ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(__snake_case )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(__snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(__snake_case , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=__snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**__snake_case , params=state.params , train=__snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class __snake_case ( train_state.TrainState ):
lowerCAmelCase_ = struct.field(pytree_node=lowerCamelCase__ )
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def __a ( self : Optional[int] , _lowercase : int , _lowercase : int , _lowercase : Tuple , _lowercase : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=_lowercase , tx=_lowercase , loss_fn=_lowercase , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**_lowercase )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=_lowercase , apply_fn=model.__call__ , params=_lowercase , tx=_lowercase , opt_state=_lowercase , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(_lowercase )
return state
def __a ( self : int , _lowercase : Dict , _lowercase : int , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(_lowercase ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(_lowercase , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(_lowercase , args.batch_size , seed=_lowercase )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(_lowercase , total=_lowercase , desc=f"""Running EPOCH-{epoch}""" ):
SCREAMING_SNAKE_CASE__ = self.data_collator(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(_lowercase , _lowercase , **_lowercase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_lowercase ) )
self.logger.log(_lowercase , commit=_lowercase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"""-e{epoch}-s{i}""" , state=_lowercase )
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(_lowercase , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(_lowercase ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(_lowercase , total=_lowercase , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(_lowercase )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(_lowercase , **_lowercase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def __a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(_lowercase )
print(f"""SAVING CHECKPOINT IN {save_dir}""" , end=""" ... """ )
self.model_save_fn(_lowercase , params=state.params )
with open(os.path.join(_lowercase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowercase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_lowercase , """data_collator.joblib""" ) )
with open(os.path.join(_lowercase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _lowercase )
print("""DONE""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=""" ... """ )
with open(os.path.join(__snake_case , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(__snake_case , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(__snake_case , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(__snake_case , """data_collator.joblib""" ) )
with open(os.path.join(__snake_case , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__snake_case )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=__snake_case , end_value=1E-7 , transition_steps=__snake_case )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str ) -> Dict:
"""simple docstring"""
def weight_decay_mask(__UpperCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(__snake_case )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__snake_case )
SCREAMING_SNAKE_CASE__ = scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case )
return tx, lr
| 356
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE__ = i
for k in range(i + 1 , __UpperCamelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE__ = k
if least != i:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCamelCase : Any = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 204
| 0
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ : Any = HfArgumentParser(A__ )
lowerCamelCase__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
lowerCamelCase__ : str = TensorFlowBenchmark(args=A__ )
try:
lowerCamelCase__ : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCamelCase__ : str = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowerCamelCase__ : List[Any] = ''' '''.join(str(A__ ).split(''' ''' )[:-1] )
lowerCamelCase__ : Union[str, Any] = ''''''
lowerCamelCase__ : Optional[Any] = eval(str(A__ ).split(''' ''' )[-1] )
lowerCamelCase__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
lowerCamelCase__ : int = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 142
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
lowerCamelCase__ : Tuple = 0
def __lowerCamelCase ( self : List[str] ) ->int:
lowerCamelCase__ : int = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A , A )
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Tuple = Path(A ) / '''preprocessor_config.json'''
lowerCamelCase__ : Union[str, Any] = Path(A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A , '''w''' ) )
lowerCamelCase__ : Dict = AutoImageProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : str = Path(A ) / '''preprocessor_config.json'''
lowerCamelCase__ : Optional[int] = Path(A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A , '''w''' ) )
lowerCamelCase__ : str = AutoImageProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : List[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCamelCase__ : Optional[Any] = Path(A ) / '''preprocessor_config.json'''
lowerCamelCase__ : str = Path(A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCamelCase__ : str = AutoImageProcessor.from_pretrained(A ).to_dict()
config_dict.pop('''image_processor_type''' )
lowerCamelCase__ : Tuple = CLIPImageProcessor(**A )
# save in new folder
model_config.save_pretrained(A )
config.save_pretrained(A )
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A )
# make sure private variable is not incorrectly saved
lowerCamelCase__ : int = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A , A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : str = Path(A ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A , '''w''' ) , )
lowerCamelCase__ : List[str] = AutoImageProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
def __lowerCamelCase ( self : List[Any] ) ->Dict:
with self.assertRaisesRegex(
A , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowerCamelCase__ : List[str] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
with self.assertRaisesRegex(
A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(A , revision='''aaaaaa''' )
def __lowerCamelCase ( self : Dict ) ->Optional[Any]:
with self.assertRaisesRegex(
A , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowerCamelCase__ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A ):
lowerCamelCase__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A ):
lowerCamelCase__ : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A )
lowerCamelCase__ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A )
lowerCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(A , trust_remote_code=A )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowerCamelCase ( self : int ) ->List[Any]:
try:
AutoConfig.register('''custom''' , A )
AutoImageProcessor.register(A , A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoImageProcessor.register(A , A )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : Any = Path(A ) / '''preprocessor_config.json'''
lowerCamelCase__ : Union[str, Any] = Path(A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A , '''w''' ) )
lowerCamelCase__ : Tuple = CustomImageProcessor.from_pretrained(A )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained(A )
self.assertIsInstance(A , A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Union[str, Any] ) ->Dict:
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = True
try:
AutoConfig.register('''custom''' , A )
AutoImageProcessor.register(A , A )
# If remote code is not set, the default is to use local
lowerCamelCase__ : List[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase__ : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase__ : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 265
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A : Any = [0, 25, 50]
_A : Dict = [25, 50, 75]
_A : Any = fuzz.membership.trimf(X, abca)
_A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A : List[str] = np.ones(75)
_A : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 265
| 1
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : Optional[int] ):
snake_case_ = psutil.Process()
snake_case_ = False
def A_ ( self : int ):
snake_case_ = -1
while True:
snake_case_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : Union[str, Any] ):
snake_case_ = True
snake_case_ = threading.Thread(target=self.peak_monitor )
snake_case_ = True
self.thread.start()
def A_ ( self : Dict ):
snake_case_ = False
self.thread.join()
return self.cpu_memory_peak
a : Dict = PeakCPUMemory()
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case_ = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
snake_case_ = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
snake_case_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
snake_case_ = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 56
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a : Dict = None
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a : List[Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = TaTokenizer
snake_case_ = []
def __init__( self : List[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : int="<pad>" , lowercase_ : int=100 , lowercase_ : List[Any]=None , **lowercase_ : List[str] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"<extra_id_{i}>" for i in range(lowercase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case_ = len(set(filter(lambda lowercase_ : bool('''extra_id_''' in str(lowercase_ ) ) , lowercase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = extra_ids
@staticmethod
def A_ ( lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , lowercase_ , )
return max_model_length
def A_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
logger.info(F"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def A_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : Dict ):
return list(
set(filter(lambda lowercase_ : bool(re.search(R'''<extra_id_\d+>''' , lowercase_ ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self : Any ):
return [self.convert_tokens_to_ids(lowercase_ ) for token in self.get_sentinel_tokens()]
| 56
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_A = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 261
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase__ ( a__ : BertModel , a__ : str , a__ : str ) -> Tuple:
UpperCamelCase_ = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
UpperCamelCase_ = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(a__ ):
os.makedirs(a__ )
UpperCamelCase_ = model.state_dict()
def to_tf_var_name(a__ : str ):
for patt, repl in iter(a__ ):
UpperCamelCase_ = name.replace(a__ , a__ )
return f'''bert/{name}'''
def create_tf_var(a__ : np.ndarray , a__ : str , a__ : tf.Session ):
UpperCamelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase_ = tf.get_variable(dtype=a__ , shape=tensor.shape , name=a__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase_ = to_tf_var_name(a__ )
UpperCamelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase_ = torch_tensor.T
UpperCamelCase_ = create_tf_var(tensor=a__ , name=a__ , session=a__ )
tf.keras.backend.set_value(a__ , a__ )
UpperCamelCase_ = session.run(a__ )
print(f'''Successfully created {tf_name}: {np.allclose(a__ , a__ )}''' )
UpperCamelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(a__ , os.path.join(a__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowerCamelCase__ ( a__ : Union[str, Any]=None ) -> Any:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=a__ , required=a__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=a__ , default=a__ , required=a__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=a__ , required=a__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=a__ , required=a__ , help="""Directory in which to save tensorflow model""" )
UpperCamelCase_ = parser.parse_args(a__ )
UpperCamelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 261
| 1
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = -1
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: List[Any] = TextStreamer(lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: int = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = -1
SCREAMING_SNAKE_CASE_: Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = tokenizer.decode(greedy_ids[0])
SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Optional[int] = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
SCREAMING_SNAKE_CASE_: int = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = -1
SCREAMING_SNAKE_CASE_: Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: str = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Dict = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained("distilgpt2")
SCREAMING_SNAKE_CASE_: str = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = -1
SCREAMING_SNAKE_CASE_: Tuple = torch.ones((1, 5) , device=lowerCAmelCase__).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Optional[Any] = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: List[str] = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: Any = tokenizer(lowerCAmelCase__ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = -1
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001)
SCREAMING_SNAKE_CASE_: Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: List[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: List[Any] = ""
for new_text in streamer:
streamer_text += new_text
| 13
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = '''blip_2_vision_model'''
def __init__( self, A=1_408, A=6_144, A=39, A=16, A=224, A=14, A="gelu", A=0.0_00_01, A=0.0, A=1E-10, A=True, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
cls._set_token_in_kwargs(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = cls.get_config_dict(A, **A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A, **A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = '''blip_2_qformer'''
def __init__( self, A=30_522, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=0.02, A=1E-12, A=0, A="absolute", A=2, A=1_408, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, **A )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : Any = cross_attention_frequency
SCREAMING_SNAKE_CASE : List[Any] = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
cls._set_token_in_kwargs(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = cls.get_config_dict(A, **A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
SCREAMING_SNAKE_CASE : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A, **A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''blip-2'''
A : Optional[int] = True
def __init__( self, A=None, A=None, A=None, A=32, **A ):
'''simple docstring'''
super().__init__(**A )
if vision_config is None:
SCREAMING_SNAKE_CASE : Tuple = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
SCREAMING_SNAKE_CASE : List[str] = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
SCREAMING_SNAKE_CASE : Any = BlipaVisionConfig(**A )
SCREAMING_SNAKE_CASE : Optional[Any] = BlipaQFormerConfig(**A )
SCREAMING_SNAKE_CASE : List[str] = text_config['model_type'] if 'model_type' in text_config else 'opt'
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[text_model_type](**A )
SCREAMING_SNAKE_CASE : Dict = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE : List[Any] = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE : Optional[Any] = num_query_tokens
SCREAMING_SNAKE_CASE : Dict = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = 1.0
SCREAMING_SNAKE_CASE : Tuple = 0.02
@classmethod
def UpperCamelCase_ ( cls, A, A, A, **A, ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : str = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE : List[str] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output
| 246
|
'''simple docstring'''
import math
def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: float ):
"""simple docstring"""
return math.pow(__UpperCamelCase ,2 ) - a
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
return 2 * x
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE : Dict = math.pow(__UpperCamelCase ,2 )
return start
def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: int = 99_99 ,__UpperCamelCase: float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_initial_point(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = value
SCREAMING_SNAKE_CASE : Dict = value - fx(__UpperCamelCase ,__UpperCamelCase ) / fx_derivative(__UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246
| 1
|
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Any , UpperCamelCase : int = 0 ):
'''simple docstring'''
lowercase__ = key
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = ''''''
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = ''''''
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 2
|
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : list[float] ):
'''simple docstring'''
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCamelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase ) )
return round(lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A_ = KandinskyVaaInpaintPipeline
A_ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
A_ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
A_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A_ = False
@property
def __A ( self: str ) -> str:
return 32
@property
def __A ( self: Any ) -> str:
return 32
@property
def __A ( self: str ) -> List[Any]:
return self.time_input_dim
@property
def __A ( self: Union[str, Any] ) -> List[Any]:
return self.time_input_dim * 4
@property
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
return 1_00
@property
def __A ( self: Dict ) -> List[Any]:
torch.manual_seed(0 )
_A = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_A = UNetaDConditionModel(**_lowercase )
return model
@property
def __A ( self: List[str] ) -> Optional[int]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self: List[str] ) -> int:
torch.manual_seed(0 )
_A = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self: Optional[Any] ) -> List[str]:
_A = self.dummy_unet
_A = self.dummy_movq
_A = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , )
_A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self: Union[str, Any] , __A: Optional[Any] , __A: Optional[Any]=0 ) -> Any:
_A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
_A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
_A = np.ones((64, 64) , dtype=np.floataa )
_A = 0
if str(_lowercase ).startswith('''mps''' ):
_A = torch.manual_seed(_lowercase )
else:
_A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_A = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __A ( self: int ) -> List[Any]:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**_lowercase )
_A = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_A = pipe(**self.get_dummy_inputs(_lowercase ) )
_A = output.images
_A = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self: Union[str, Any] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Any ) -> int:
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_A = np.ones((7_68, 7_68) , dtype=np.floataa )
_A = 0
_A = '''a hat'''
_A = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
_A = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
_A = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A ,_A = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_A = pipeline(
image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 369
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __A ( _lowercase ):
'''simple docstring'''
if "://" in dataset_path:
_A = dataset_path.split('''://''' )[1]
return dataset_path
def __A ( _lowercase ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = not is_remote_filesystem(_lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowercase ) , fs._strip_protocol(_lowercase ) )
else:
fs.mv(_lowercase , _lowercase , recursive=_lowercase )
def __A ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_A = None
_A = None
_A = threading.Lock()
| 75
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : str = logging.get_logger(__name__)
a : int = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ , __magic_name__ ):
lowercase = 'focalnet'
def __init__( self , A=224 , A=4 , A=3 , A=96 , A=False , A=[192, 384, 768, 768] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1e-4 , A=False , A=False , A=False , A=0.0_2 , A=1e-5 , A=32 , A=None , A=None , **A , ) -> List[str]:
super().__init__(**A )
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = embed_dim
UpperCAmelCase : int = use_conv_embed
UpperCAmelCase : Optional[Any] = hidden_sizes
UpperCAmelCase : List[Any] = depths
UpperCAmelCase : Optional[int] = focal_levels
UpperCAmelCase : Any = focal_windows
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Optional[int] = mlp_ratio
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Any = drop_path_rate
UpperCAmelCase : str = use_layerscale
UpperCAmelCase : Union[str, Any] = layerscale_value
UpperCAmelCase : int = use_post_layernorm
UpperCAmelCase : Tuple = use_post_layernorm_in_modulation
UpperCAmelCase : str = normalize_modulator
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : str = encoder_stride
UpperCAmelCase : str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
| 265
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( _lowercase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Any = create_tensor(_lowercase )
UpperCAmelCase : Union[str, Any] = gather(_lowercase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCAmelCase : Any = [state.process_index]
UpperCAmelCase : Union[str, Any] = gather_object(_lowercase )
assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[int] = create_tensor(_lowercase )
UpperCAmelCase : List[str] = broadcast(_lowercase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( _lowercase ) -> Tuple:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( _lowercase ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Optional[Any] = create_tensor(_lowercase )
UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" )
UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase : Tuple = create_tensor(_lowercase )
UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" )
UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}'''
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(_lowercase )
state.print("""testing gather_object""" )
test_gather_object(_lowercase )
state.print("""testing broadcast""" )
test_broadcast(_lowercase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_lowercase )
state.print("""testing reduce_sum""" )
test_reduce_sum(_lowercase )
state.print("""testing reduce_mean""" )
test_reduce_mean(_lowercase )
if __name__ == "__main__":
main()
| 265
| 1
|
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""speech"""]
def __init__(self :Optional[int] , *_UpperCamelCase :int , **_UpperCamelCase :List[str] )-> List[str]:
requires_backends(self , ['''speech'''] )
class A_ ( metaclass=_lowerCamelCase ):
lowerCAmelCase__ = ["""speech"""]
def __init__(self :Optional[int] , *_UpperCamelCase :str , **_UpperCamelCase :List[str] )-> Union[str, Any]:
requires_backends(self , ['''speech'''] )
| 250
|
def _a ( lowerCamelCase: dict ) -> bool:
'''simple docstring'''
__A = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__A = set()
return any(
node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for node in graph )
def _a ( lowerCamelCase: dict , lowerCamelCase: int , lowerCamelCase: set , lowerCamelCase: set ) -> bool:
'''simple docstring'''
visited.add(lowerCamelCase )
rec_stk.add(lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 250
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.