code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
a_ : Dict = 8.3_14_45_98
def a_ ( __snake_case : float , __snake_case : float ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ : Optional[Any] = 3_00
a_ : int = 28
a_ : Any = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 75
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =FunnelTokenizer
lowercase : List[str] =FunnelTokenizerFast
lowercase : Union[str, Any] =True
lowercase : int =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =[
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''UNwant\u00E9d,running'''
lowerCamelCase_ ='''unwanted, running'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file )
lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' )
lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len )
lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
| 75
| 1
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
__lowercase = VideoClassificationPipeline(model=__a, image_processor=__a, top_k=2 )
__lowercase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowercase ( self : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ):
for example in examples:
__lowercase = video_classifier(__a )
self.assertEqual(
__a, [
{"score": ANY(__a ), "label": ANY(__a )},
{"score": ANY(__a ), "label": ANY(__a )},
], )
@require_torch
def _lowercase ( self : List[Any] ):
__lowercase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
__lowercase = VideoMAEFeatureExtractor(
size={"shortest_edge": 1_0}, crop_size={"height": 1_0, "width": 1_0} )
__lowercase = pipeline(
"video-classification", model=__a, feature_extractor=__a, frame_sampling_rate=4 )
__lowercase = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
__lowercase = video_classifier(__a, top_k=2 )
self.assertEqual(
nested_simplify(__a, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
__lowercase = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(__a, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def _lowercase ( self : Dict ):
pass
| 368
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self : Optional[int] ):
__lowercase = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
__lowercase = text_classifier("This is great !", top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
__lowercase = text_classifier(["This is great !", "This is bad"], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
], )
__lowercase = text_classifier("This is great !", top_k=1 )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
__lowercase = text_classifier("This is great !", return_all_scores=UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
__lowercase = text_classifier("This is great !", return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
__lowercase = text_classifier(["This is great !", "Something else"], return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
], )
__lowercase = text_classifier(["This is great !", "Something else"], return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
], )
@require_torch
def _lowercase ( self : Dict ):
import torch
__lowercase = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch.device("cpu" ), )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def _lowercase ( self : Union[str, Any] ):
__lowercase = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline("text-classification" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 1.0}] )
__lowercase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "NEGATIVE", "score": 1.0}] )
__lowercase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def _lowercase ( self : Tuple ):
__lowercase = pipeline("text-classification", framework="tf" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 1.0}] )
__lowercase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "NEGATIVE", "score": 1.0}] )
__lowercase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 0.988}] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ):
__lowercase = TextClassificationPipeline(model=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowercase = "HuggingFace is in"
__lowercase = text_classifier(UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__lowercase = ["HuggingFace is in ", "Paris is in France"]
__lowercase = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}], )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowercase = text_classifier(UpperCAmelCase__, top_k=UpperCAmelCase__ )
__lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [[{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N, [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N], )
__lowercase = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__lowercase = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowercase = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(UpperCAmelCase__ ):
text_classifier(UpperCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowercase = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}], )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 144
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = SwinvaConfig()
A_ : Optional[Any] = swinva_name.split("""_""" )
A_ : Tuple = name_split[1]
if "to" in name_split[3]:
A_ : Optional[Any] = int(name_split[3][-3:] )
else:
A_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
A_ : int = int(name_split[2][-2:] )
else:
A_ : int = int(name_split[2][6:] )
if model_size == "tiny":
A_ : Any = 9_6
A_ : str = (2, 2, 6, 2)
A_ : Tuple = (3, 6, 1_2, 2_4)
elif model_size == "small":
A_ : Tuple = 9_6
A_ : str = (2, 2, 1_8, 2)
A_ : Optional[Any] = (3, 6, 1_2, 2_4)
elif model_size == "base":
A_ : int = 1_2_8
A_ : Tuple = (2, 2, 1_8, 2)
A_ : Optional[int] = (4, 8, 1_6, 3_2)
else:
A_ : List[str] = 1_9_2
A_ : List[str] = (2, 2, 1_8, 2)
A_ : List[str] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
A_ : Tuple = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A_ : List[Any] = 2_1_8_4_1
A_ : str = """huggingface/label-files"""
A_ : Optional[int] = """imagenet-22k-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : Dict = {int(a_ ): v for k, v in idalabel.items()}
A_ : str = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
else:
A_ : Optional[Any] = 1_0_0_0
A_ : str = """huggingface/label-files"""
A_ : Tuple = """imagenet-1k-id2label.json"""
A_ : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : int = {int(a_ ): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : int = {v: k for k, v in idalabel.items()}
A_ : Any = img_size
A_ : Optional[Any] = num_classes
A_ : Union[str, Any] = embed_dim
A_ : Dict = depths
A_ : str = num_heads
A_ : Any = window_size
return config
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
A_ : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ : List[str] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
A_ : int = """encoder.""" + name
if "attn.proj" in name:
A_ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A_ : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A_ : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
A_ : Optional[Any] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
A_ : Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
A_ : Tuple = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
A_ : Optional[Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
A_ : int = """layernorm.weight"""
if name == "norm.bias":
A_ : Tuple = """layernorm.bias"""
if "head" in name:
A_ : Union[str, Any] = name.replace("""head""" , """classifier""" )
else:
A_ : Optional[int] = """swinv2.""" + name
return name
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A_ : Tuple = orig_state_dict.pop(a_ )
if "mask" in key:
continue
elif "qkv" in key:
A_ : Any = key.split(""".""" )
A_ : Optional[int] = int(key_split[1] )
A_ : Optional[Any] = int(key_split[3] )
A_ : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A_ : str = val[:dim, :]
A_ : List[str] = val[dim : dim * 2, :]
A_ : Tuple = val[-dim:, :]
else:
A_ : Union[str, Any] = val[:dim]
A_ : Dict = val[
dim : dim * 2
]
A_ : List[str] = val[-dim:]
else:
A_ : Any = val
return orig_state_dict
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : Optional[Any] = timm.create_model(a_ , pretrained=a_ )
timm_model.eval()
A_ : str = get_swinva_config(a_ )
A_ : List[str] = SwinvaForImageClassification(a_ )
model.eval()
A_ : Optional[int] = convert_state_dict(timm_model.state_dict() , a_ )
model.load_state_dict(a_ )
A_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[str] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
A_ : Any = Image.open(requests.get(a_ , stream=a_ ).raw )
A_ : Dict = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Tuple = timm_model(inputs["""pixel_values"""] )
A_ : Union[str, Any] = model(**a_ ).logits
assert torch.allclose(a_ , a_ , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(a_ )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 344
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A : str = logging.get_logger(__name__)
A : str = {"""vocab_file""": """vocab.txt"""}
A : Optional[int] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
A : List[Any] = {
"""YituTech/conv-bert-base""": 5_1_2,
"""YituTech/conv-bert-medium-small""": 5_1_2,
"""YituTech/conv-bert-small""": 5_1_2,
}
A : List[Any] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class A (SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]="[UNK]" , __lowerCAmelCase : Tuple="[SEP]" , __lowerCAmelCase : List[str]="[PAD]" , __lowerCAmelCase : List[Any]="[CLS]" , __lowerCAmelCase : List[str]="[MASK]" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=None , **__lowerCAmelCase : List[str] , ) -> str:
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case__ ) != tokenize_chinese_chars
):
A__ = getattr(snake_case__ , normalizer_state.pop("""type""" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**snake_case__ )
A__ = do_lower_case
def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=None ) -> str:
"""simple docstring"""
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict = None ) -> Optional[Any]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any = None ) -> Tuple:
"""simple docstring"""
A__ = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 353
|
from string import ascii_uppercase
A : List[str] = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def __lowerCamelCase ( __a :int , __a :int ) -> str:
"""simple docstring"""
if isinstance(__a , __a ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__a , __a ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__a , __a ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 3_6:
raise ValueError("""base must be <= 36""" )
A__ = """"""
A__ = 0
A__ = 0
while div != 1:
A__ , A__ = divmod(__a , __a )
if base >= 1_1 and 9 < mod < 3_6:
A__ = ALPHABET_VALUES[str(__a )]
else:
A__ = str(__a )
new_value += actual_value
A__ = num // base
A__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__a )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 276
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_a = False, False, False
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCAmelCase_ = field(default="""Audio""" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(snake_case_ , snake_case_ ):
return {"bytes": None, "path": value}
elif isinstance(snake_case_ , snake_case_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase__ = BytesIO()
sf.write(snake_case_ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase__ = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
lowerCamelCase__ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
lowerCamelCase__ = BytesIO(bytes() )
sf.write(snake_case_ , snake_case_ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowerCamelCase__ = (value['path'], BytesIO(value['''bytes'''] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowerCamelCase__ = xsplitext(snake_case_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowerCamelCase__ = token_per_repo_id or {}
lowerCamelCase__ = path.split('''::''' )[-1]
try:
lowerCamelCase__ = string_to_dict(snake_case_ , config.HUB_DATASETS_URL )['repo_id']
lowerCamelCase__ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase__ = None
with xopen(snake_case_ , '''rb''' , use_auth_token=snake_case_ ) as f:
lowerCamelCase__ = sf.read(snake_case_ )
else:
lowerCamelCase__ = sf.read(snake_case_ )
lowerCamelCase__ = array.T
if self.mono:
lowerCamelCase__ = librosa.to_mono(snake_case_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase__ = librosa.resample(snake_case_ , orig_sr=snake_case_ , target_sr=self.sampling_rate )
lowerCamelCase__ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCamelCase ( self ):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCamelCase__ = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
lowerCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ = pa.array([None] * len(snake_case_ ) , type=pa.string() )
lowerCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowerCamelCase__ = pa.array([Audio().encode_example(snake_case_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowerCamelCase__ = storage.field('''bytes''' )
else:
lowerCamelCase__ = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowerCamelCase__ = storage.field('''path''' )
else:
lowerCamelCase__ = pa.array([None] * len(snake_case_ ) , type=pa.string() )
lowerCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(snake_case_ , self.pa_type )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase ):
with xopen(snake_case_ , '''rb''' ) as f:
lowerCamelCase__ = f.read()
return bytes_
lowerCamelCase__ = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ = pa.array(
[os.path.basename(snake_case_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(snake_case_ , self.pa_type )
| 209
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 286
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Union[str, Any] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
_lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
_lowerCamelCase : Optional[int] = {
'''facebook/s2t-small-librispeech-asr''': 10_24,
}
_lowerCamelCase : str = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
_lowerCamelCase : Any = {'''mustc''': MUSTC_LANGS}
class lowercase ( a ):
lowercase__ : List[str] = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = MAX_MODEL_INPUT_SIZES
lowercase__ : int = ["""input_ids""", """attention_mask"""]
lowercase__ : List[int] = []
def __init__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : int="<s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : Union[str, Any]="<pad>" , _UpperCamelCase : str="<unk>" , _UpperCamelCase : str=False , _UpperCamelCase : Tuple=False , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : int , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , do_upper_case=_UpperCamelCase , do_lower_case=_UpperCamelCase , tgt_lang=_UpperCamelCase , lang_codes=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = do_upper_case
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = load_json(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = spm_file
SCREAMING_SNAKE_CASE = load_spm(_UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE = lang_codes
SCREAMING_SNAKE_CASE = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE = [F"<lang:{lang}>" for lang in self.langs]
SCREAMING_SNAKE_CASE = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
SCREAMING_SNAKE_CASE = self.lang_tokens
SCREAMING_SNAKE_CASE = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE = {}
@property
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE = [lang_code_id]
def __snake_case( self : Tuple , _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : List[str] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
return self.encoder.get(_UpperCamelCase , self.encoder[self.unk_token] )
def __snake_case( self : List[str] , _UpperCamelCase : int ) -> str:
'''simple docstring'''
return self.decoder.get(_UpperCamelCase , self.unk_token )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE = self.sp_model.decode(_UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.sp_model.decode(_UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Union[str, Any] , _UpperCamelCase : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = load_spm(self.spm_file , self.sp_model_kwargs )
def __snake_case( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_UpperCamelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (str(_UpperCamelCase ), str(_UpperCamelCase ))
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Dict[str, Any] ):
SCREAMING_SNAKE_CASE = sentencepiece.SentencePieceProcessor(**UpperCAmelCase__ )
spm.Load(str(UpperCAmelCase__ ) )
return spm
def __lowerCamelCase (UpperCAmelCase__ : str ):
with open(UpperCAmelCase__ , "r" ) as f:
return json.load(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ):
with open(UpperCAmelCase__ , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , indent=2 )
| 206
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Dict = (UniPCMultistepScheduler,)
lowercase__ : Optional[int] = (("""num_inference_steps""", 25),)
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : List[str] , _UpperCamelCase : Dict=0 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : List[str] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case( self : List[str] , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 206
| 1
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class _a ( UpperCamelCase__ ):
def __init__( self: Tuple , *UpperCamelCase_: Tuple , **UpperCamelCase_: Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 110
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['DPTFeatureExtractor']
lowerCAmelCase = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110
| 1
|
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['input_features', 'attention_mask']
def __init__( self : Tuple , lowercase__ : List[str]=80 , lowercase__ : Optional[int]=16_000 , lowercase__ : Any=80 , lowercase__ : Optional[Any]=0.0 , lowercase__ : int=True , lowercase__ : Optional[int]=True , lowercase__ : Any=True , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(feature_size=lowercase__ , sampling_rate=lowercase__ , padding_value=lowercase__ , **lowercase__)
lowerCAmelCase__ = num_mel_bins
lowerCAmelCase__ = do_ceptral_normalize
lowerCAmelCase__ = normalize_means
lowerCAmelCase__ = normalize_vars
lowerCAmelCase__ = True
def __snake_case ( self : List[Any] , lowercase__ : np.ndarray , ):
'''simple docstring'''
lowerCAmelCase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase__ = torch.from_numpy(lowercase__).unsqueeze(0)
lowerCAmelCase__ = ta_kaldi.fbank(lowercase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def __snake_case ( lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : Optional[bool] = True , lowercase__ : Optional[bool] = True , lowercase__ : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
lowerCAmelCase__ = x[:input_length].mean(axis=0)
lowerCAmelCase__ = np.subtract(lowercase__ , lowercase__)
if normalize_vars:
lowerCAmelCase__ = x[:input_length].std(axis=0)
lowerCAmelCase__ = np.divide(lowercase__ , lowercase__)
if input_length < x.shape[0]:
lowerCAmelCase__ = padding_value
# make sure array is in float32
lowerCAmelCase__ = x.astype(np.floataa)
return x
def __snake_case ( self : Dict , lowercase__ : List[np.ndarray] , lowercase__ : Optional[np.ndarray] = None):
'''simple docstring'''
lowerCAmelCase__ = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase__ , lowercase__ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(lowercase__ , lowercase__)
]
def __call__( self : List[str] , lowercase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Optional[int] = None , lowercase__ : bool = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
lowerCAmelCase__ = isinstance(lowercase__ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
lowerCAmelCase__ = is_batched_numpy or (
isinstance(lowercase__ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase__ , np.ndarray):
lowerCAmelCase__ = np.asarray(lowercase__ , dtype=np.floataa)
elif isinstance(lowercase__ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase__ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase__ = [raw_speech]
# extract fbank features
lowerCAmelCase__ = [self._extract_fbank_features(lowercase__) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase__ = BatchFeature({'input_features': features})
lowerCAmelCase__ = self.pad(
lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
# make sure list is in array format
lowerCAmelCase__ = padded_inputs.get('input_features')
if isinstance(input_features[0] , lowercase__):
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.floataa) for feature in input_features]
lowerCAmelCase__ = padded_inputs.get('attention_mask')
if attention_mask is not None:
lowerCAmelCase__ = [np.asarray(lowercase__ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase__ = (
np.array(lowercase__ , dtype=np.intaa)
if self._get_padding_strategies(lowercase__ , max_length=lowercase__) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase__ = self.normalize(
padded_inputs['input_features'] , attention_mask=lowercase__)
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(lowercase__)
return padded_inputs
| 119
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCAmelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCAmelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCAmelCase__ , default=4_2 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCAmelCase__ , default=0 , help='cuda_id.' , )
lowerCAmelCase__ = parser.parse_args()
return args
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not len(lowerCAmelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase__ , lowerCAmelCase__ = imgs[0].size
lowerCAmelCase__ = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase__ , lowerCAmelCase__ = grid.size
for i, img in enumerate(lowerCAmelCase__ ):
grid.paste(lowerCAmelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__="robotic cat with wings" , lowerCAmelCase__=7.5 , lowerCAmelCase__=5_0 , lowerCAmelCase__=1 , lowerCAmelCase__=4_2 , ):
lowerCAmelCase__ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase__ = pipeline(
lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , ).images
lowerCAmelCase__ = int(math.sqrt(lowerCAmelCase__ ) )
lowerCAmelCase__ = image_grid(lowerCAmelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCAmelCase__ = unet.to(torch.device('cuda', args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__ , lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 119
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''CLIPImageProcessor'''
_a = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Union[str, Any]=None , A_ : List[Any]=None , **A_ : int):
lowerCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
lowerCAmelCase_ : str = kwargs.pop('''feature_extractor''')
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A_ , A_)
def __call__( self : Optional[Any] , A_ : Dict=None , A_ : int=None , A_ : str=None , **A_ : List[str]):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(A_ , return_tensors=A_ , **A_)
if images is not None:
lowerCAmelCase_ : str = self.image_processor(A_ , return_tensors=A_ , **A_)
if text is not None and images is not None:
lowerCAmelCase_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_) , tensor_type=A_)
def UpperCAmelCase__ ( self : Any , *A_ : Tuple , **A_ : List[str]):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : str , *A_ : List[str] , **A_ : Tuple):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 103
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
A__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( ) -> int:
lowerCamelCase_ : Tuple =argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCamelCase__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCamelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCamelCase__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCamelCase__ , default="data/dump" , help="The dump file prefix." )
lowerCamelCase_ : Tuple =parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowerCamelCase_ : Tuple =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Optional[Any] =tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ : str =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : List[Any] =tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ : Tuple =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Dict =tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCamelCase_ : Optional[int] =fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(lowerCamelCase__ )} examples to process.""" )
lowerCamelCase_ : str =[]
lowerCamelCase_ : Union[str, Any] =0
lowerCamelCase_ : List[str] =10_000
lowerCamelCase_ : int =time.time()
for text in data:
lowerCamelCase_ : List[str] =F"""{bos} {text.strip()} {sep}"""
lowerCamelCase_ : str =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
rslt.append(lowerCamelCase__ )
iter += 1
if iter % interval == 0:
lowerCamelCase_ : List[Any] =time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowerCamelCase_ : Tuple =time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(lowerCamelCase__ )} examples processed.""" )
lowerCamelCase_ : Optional[Any] =F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowerCamelCase_ : Optional[int] =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ : int =[np.uintaa(lowerCamelCase__ ) for d in rslt]
else:
lowerCamelCase_ : Tuple =[np.intaa(lowerCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 144
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__(self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=1_3 , __SCREAMING_SNAKE_CASE : Any=1_0 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Any=3_2 , __SCREAMING_SNAKE_CASE : str=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Optional[int]=3_7 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : int=1_0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0_2 , __SCREAMING_SNAKE_CASE : str=0.9 , __SCREAMING_SNAKE_CASE : int=None , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = patch_size
A = tubelet_size
A = num_frames
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A = (image_size // patch_size) ** 2
A = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A = int(mask_ratio * self.seq_length)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self : List[str]):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]):
A = VideoMAEModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
A = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]):
A = VideoMAEForPreTraining(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A = torch.ones((self.num_masks,))
A = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
A = mask.expand(self.batch_size , -1).bool()
A = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# model only returns predictions for masked patches
A = mask.sum().item()
A = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ (self : Any):
A = VideoMAEModelTester(self)
A = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int=False):
A = copy.deepcopy(__SCREAMING_SNAKE_CASE)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A = torch.ones((self.model_tester.num_masks,))
A = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
A = mask.expand(self.model_tester.batch_size , -1).bool()
A = bool_masked_pos.to(__SCREAMING_SNAKE_CASE)
if return_labels:
if model_class in [
*get_values(__SCREAMING_SNAKE_CASE),
]:
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE)
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds")
def SCREAMING_SNAKE_CASE__ (self : Tuple):
pass
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def SCREAMING_SNAKE_CASE__ (self : Any):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__SCREAMING_SNAKE_CASE)
A = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE)
@slow
def SCREAMING_SNAKE_CASE__ (self : Any):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = VideoMAEModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : str):
if not self.has_attentions:
pass
else:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = self.model_tester.seq_length - self.model_tester.num_masks
A = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A = True
A = False
A = True
A = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
A = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
A = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A = len(__SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
self.assertEqual(out_len + 1 , len(__SCREAMING_SNAKE_CASE))
A = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]):
A = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
A = outputs.hidden_states
A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
A = self.model_tester.seq_length - self.model_tester.num_masks
A = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE__ (self : Tuple):
pass
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A = np.load(lowercase__ )
return list(lowercase__ )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to(
__SCREAMING_SNAKE_CASE)
A = self.default_image_processor
A = prepare_video()
A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt").to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
A = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
A = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
A = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self : int):
A = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(__SCREAMING_SNAKE_CASE)
A = self.default_image_processor
A = prepare_video()
A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="pt").to(__SCREAMING_SNAKE_CASE)
# add boolean mask, indicating which patches to mask
A = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt")
A = torch.load(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
A = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
A = torch.Size([1, 1_4_0_8, 1_5_3_6])
A = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__SCREAMING_SNAKE_CASE)
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `True`)
A = torch.tensor([0.5_1_4_2] , device=__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.loss , __SCREAMING_SNAKE_CASE , atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `False`)
A = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=__SCREAMING_SNAKE_CASE).to(
__SCREAMING_SNAKE_CASE)
with torch.no_grad():
A = model(**__SCREAMING_SNAKE_CASE)
A = torch.tensor(torch.tensor([0.6_4_6_9]) , device=__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.loss , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 57
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : Optional[Any] = random.Random()
if is_torch_available():
import torch
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
"""simple docstring"""
if rng is None:
A = global_rng
A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCamelCase ( unittest.TestCase ):
def __init__(self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=4_0_0 , __SCREAMING_SNAKE_CASE : Optional[int]=2_0_0_0 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=1_6_0_0_0 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , ):
A = parent
A = batch_size
A = min_seq_length
A = max_seq_length
A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A = feature_size
A = padding_value
A = sampling_rate
A = return_attention_mask
A = do_normalize
def SCREAMING_SNAKE_CASE__ (self : Tuple):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=False):
def _flatten(__SCREAMING_SNAKE_CASE : Dict):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE))
if equal_length:
A = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
A = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
A = [np.asarray(__SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = ASTFeatureExtractionTester(self)
def SCREAMING_SNAKE_CASE__ (self : Dict):
# Tests that all call wrap to encode_plus and batch_encode_plus
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
A = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
A = [np.asarray(__SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test not batched input
A = feat_extract(speech_inputs[0] , return_tensors="np").input_values
A = feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# Test batched
A = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
A = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# Test 2-D numpy arrays are batched.
A = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A = np.asarray(__SCREAMING_SNAKE_CASE)
A = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
A = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
@require_torch
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
import torch
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
A = np.random.rand(1_0_0).astype(np.floataa)
A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
A = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict):
from datasets import load_dataset
A = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
A = ds.sort("id").select(range(__SCREAMING_SNAKE_CASE))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE__ (self : Dict):
# fmt: off
A = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9])
# fmt: on
A = self._load_datasamples(1)
A = ASTFeatureExtractor()
A = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : str = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 18
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276
| 0
|
import enum
import shutil
import sys
_a , _a = shutil.get_terminal_size()
_a = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class A_ (enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
def __a ( __lowerCamelCase, __lowerCamelCase="" ):
sys.stdout.write(str(__lowerCamelCase ) + end )
sys.stdout.flush()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="" ):
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""", __lowerCamelCase )
def __a ( ):
forceWrite("\r" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 368
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23
| 0
|
'''simple docstring'''
import math
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase :int = '''Enter the base and the power separated by a comma: '''
lowerCamelCase , lowerCamelCase :Optional[int] = map(int, input(prompt).split(''','''))
lowerCamelCase , lowerCamelCase :str = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase :Any = res(xa, ya)
lowerCamelCase :List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 206
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a ( lowerCamelCase__=None ):
'''simple docstring'''
A_ : int = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ )
# The main config parser
A_ : int = config_command_parser(lowerCamelCase__ )
# The subparser to add commands to
A_ : int = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser] )
update_command_parser(lowerCamelCase__ , parents=[parent_parser] )
return config_parser
def a ( ):
'''simple docstring'''
A_ : Optional[int] = get_config_parser()
A_ : List[str] = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 206
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> bool:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> bool:
'''simple docstring'''
if curr_ind == len(__lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0, len(__lowerCamelCase ) ):
if valid_connection(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Insert current vertex into path as next transition
lowerCAmelCase : Any = next_ver
# Validate created path
if util_hamilton_cycle(__lowerCamelCase, __lowerCamelCase, curr_ind + 1 ):
return True
# Backtrack
lowerCAmelCase : List[str] = -1
return False
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 0 ) -> list[int]:
'''simple docstring'''
lowerCAmelCase : Dict = [-1] * (len(__lowerCamelCase ) + 1)
# initialize start and end of path with starting index
lowerCAmelCase : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__lowerCamelCase, __lowerCamelCase, 1 ) else []
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
# Function to print upper half of diamond (pyramid)
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[int]:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__UpperCAmelCase = 1
while K:
__UpperCAmelCase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__UpperCAmelCase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 119
|
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ) -> Optional[int]:
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=snake_case__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=snake_case__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=snake_case__ , help='where to store parsed gold_data_path file' , )
UpperCamelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCamelCase : int = json.load(snake_case__ )
for dpr_record in tqdm(snake_case__ ):
UpperCamelCase : Union[str, Any] = dpr_record['question']
UpperCamelCase : Dict = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(snake_case__ ) + '\n' )
if __name__ == "__main__":
main()
| 119
| 1
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _snake_case ( lowercase__ ):
return 1 / (1 + np.exp(-z ))
def _snake_case ( lowercase__ , lowercase__ ):
return (-y * np.log(lowercase__ ) - (1 - y) * np.log(1 - h )).mean()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = np.dot(lowercase__ , lowercase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=70000 ):
_lowerCamelCase : int = np.zeros(x.shape[1] )
for iterations in range(lowercase__ ):
_lowerCamelCase : Dict = np.dot(lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = sigmoid_function(lowercase__ )
_lowerCamelCase : Optional[int] = np.dot(x.T , h - y ) / y.size
_lowerCamelCase : Any = theta - alpha * gradient # updating the weights
_lowerCamelCase : int = np.dot(lowercase__ , lowercase__ )
_lowerCamelCase : Any = sigmoid_function(lowercase__ )
_lowerCamelCase : str = cost_function(lowercase__ , lowercase__ )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase__ = datasets.load_iris()
lowercase__ = iris.data[:, :2]
lowercase__ = (iris.target != 0) * 1
lowercase__ = 0.1
lowercase__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _snake_case ( lowercase__ ):
return sigmoid_function(
np.dot(lowercase__ , lowercase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((lowercase__) , (lowercase__)) = (x[:, 0].min(), x[:, 0].max())
((lowercase__) , (lowercase__)) = (x[:, 1].min(), x[:, 1].max())
((lowercase__) , (lowercase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowercase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 368
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs
| 12
| 0
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A : int = open # noqa: we just need to have a builtin inside this module to test it properly
| 57
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 1
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def A__ ( self : List[Any] ):
lowercase__ = []
lowercase__ = []
for i in range(self.num_layers ):
lowercase__ = self.in_channels if i == 0 else self.out_channels
lowercase__ = FlaxResnetBlockaD(
in_channels=__lowercase, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__lowercase )
lowercase__ = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(__lowercase )
lowercase__ = resnets
lowercase__ = attentions
if self.add_downsample:
lowercase__ = FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : List[Any], __lowercase : Tuple, __lowercase : Union[str, Any], __lowercase : Any, __lowercase : Tuple=True ):
lowercase__ = ()
for resnet, attn in zip(self.resnets, self.attentions ):
lowercase__ = resnet(__lowercase, __lowercase, deterministic=__lowercase )
lowercase__ = attn(__lowercase, __lowercase, deterministic=__lowercase )
output_states += (hidden_states,)
if self.add_downsample:
lowercase__ = self.downsamplers_a(__lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def A__ ( self : int ):
lowercase__ = []
for i in range(self.num_layers ):
lowercase__ = self.in_channels if i == 0 else self.out_channels
lowercase__ = FlaxResnetBlockaD(
in_channels=__lowercase, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__lowercase )
lowercase__ = resnets
if self.add_downsample:
lowercase__ = FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : Optional[int], __lowercase : Any, __lowercase : int, __lowercase : int=True ):
lowercase__ = ()
for resnet in self.resnets:
lowercase__ = resnet(__lowercase, __lowercase, deterministic=__lowercase )
output_states += (hidden_states,)
if self.add_downsample:
lowercase__ = self.downsamplers_a(__lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def A__ ( self : Optional[int] ):
lowercase__ = []
lowercase__ = []
for i in range(self.num_layers ):
lowercase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase__ = self.prev_output_channel if i == 0 else self.out_channels
lowercase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__lowercase )
lowercase__ = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(__lowercase )
lowercase__ = resnets
lowercase__ = attentions
if self.add_upsample:
lowercase__ = FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : List[str], __lowercase : str, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str], __lowercase : Any=True ):
for resnet, attn in zip(self.resnets, self.attentions ):
# pop res hidden states
lowercase__ = res_hidden_states_tuple[-1]
lowercase__ = res_hidden_states_tuple[:-1]
lowercase__ = jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
lowercase__ = resnet(__lowercase, __lowercase, deterministic=__lowercase )
lowercase__ = attn(__lowercase, __lowercase, deterministic=__lowercase )
if self.add_upsample:
lowercase__ = self.upsamplers_a(__lowercase )
return hidden_states
class _snake_case ( nn.Module):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def A__ ( self : str ):
lowercase__ = []
for i in range(self.num_layers ):
lowercase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase__ = self.prev_output_channel if i == 0 else self.out_channels
lowercase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__lowercase )
lowercase__ = resnets
if self.add_upsample:
lowercase__ = FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self : Tuple, __lowercase : Optional[Any], __lowercase : Union[str, Any], __lowercase : Optional[int], __lowercase : Any=True ):
for resnet in self.resnets:
# pop res hidden states
lowercase__ = res_hidden_states_tuple[-1]
lowercase__ = res_hidden_states_tuple[:-1]
lowercase__ = jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
lowercase__ = resnet(__lowercase, __lowercase, deterministic=__lowercase )
if self.add_upsample:
lowercase__ = self.upsamplers_a(__lowercase )
return hidden_states
class _snake_case ( nn.Module):
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def A__ ( self : List[Any] ):
# there is always at least one resnet
lowercase__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
lowercase__ = []
for _ in range(self.num_layers ):
lowercase__ = FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(__lowercase )
lowercase__ = FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(__lowercase )
lowercase__ = resnets
lowercase__ = attentions
def __call__( self : Union[str, Any], __lowercase : Dict, __lowercase : List[str], __lowercase : Optional[Any], __lowercase : Union[str, Any]=True ):
lowercase__ = self.resnets[0](__lowercase, __lowercase )
for attn, resnet in zip(self.attentions, self.resnets[1:] ):
lowercase__ = attn(__lowercase, __lowercase, deterministic=__lowercase )
lowercase__ = resnet(__lowercase, __lowercase, deterministic=__lowercase )
return hidden_states
| 359
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""whisper"""
UpperCamelCase__ : Optional[int] =["""past_key_values"""]
UpperCamelCase__ : Optional[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any], __lowercase : List[str]=5_1865, __lowercase : Dict=80, __lowercase : List[Any]=6, __lowercase : Union[str, Any]=4, __lowercase : Tuple=6, __lowercase : Dict=4, __lowercase : List[Any]=1536, __lowercase : Tuple=1536, __lowercase : Any=0.0, __lowercase : List[str]=0.0, __lowercase : List[Any]=5_0257, __lowercase : List[str]=True, __lowercase : str=True, __lowercase : int="gelu", __lowercase : Tuple=256, __lowercase : Tuple=0.0, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=0.0, __lowercase : List[Any]=0.02, __lowercase : Union[str, Any]=False, __lowercase : str=1500, __lowercase : Optional[int]=448, __lowercase : Optional[Any]=5_0256, __lowercase : Tuple=5_0256, __lowercase : Any=5_0256, __lowercase : Union[str, Any]=None, __lowercase : Any=[220, 5_0256], __lowercase : List[Any]=False, __lowercase : int=256, __lowercase : int=False, __lowercase : Tuple=0.05, __lowercase : int=10, __lowercase : Dict=2, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=10, __lowercase : Union[str, Any]=0, __lowercase : Tuple=7, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, suppress_tokens=__lowercase, begin_suppress_tokens=__lowercase, **__lowercase, )
class _snake_case ( lowercase__):
@property
def A__ ( self : str ):
lowercase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ = {0: "batch"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowercase, direction="inputs" )
return common_inputs
def A__ ( self : int, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional["TensorType"] = None, __lowercase : int = 2_2050, __lowercase : float = 5.0, __lowercase : int = 220, ):
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=__lowercase, framework=__lowercase, sampling_rate=__lowercase, time_duration=__lowercase, frequency=__lowercase, )
lowercase__ = encoder_inputs["input_features"].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = encoder_inputs.pop("input_features" )
lowercase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A__ ( self : int ):
return 1e-3
| 224
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( A__ , A__ , A__ , unittest.TestCase ):
lowerCAmelCase_ = StableUnCLIPImgaImgPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ = frozenset([] )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL()
SCREAMING_SNAKE_CASE__ = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __a ( self : Dict , _lowercase : int , _lowercase : Union[str, Any]=0 , _lowercase : Union[str, Any]=True ):
"""simple docstring"""
if str(__snake_case ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__snake_case )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline(**__snake_case )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__snake_case )
inputs.update({"""image_embeds""": None} )
SCREAMING_SNAKE_CASE__ = sd_pipe(**__snake_case ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __a ( self : Optional[Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__snake_case , """anime turle""" , generator=__snake_case , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(__snake_case , """anime turle""" , generator=__snake_case , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = pipe(
__snake_case , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 219
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case , cache_dir=__snake_case )
UpperCAmelCase : str = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , '''snapshots''' ) )]
UpperCAmelCase : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__snake_case )
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Tuple = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : int = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Tuple = shard(__snake_case )
UpperCAmelCase : Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def A ( self : int ) -> Dict:
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case )
UpperCAmelCase : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : List[str] = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : List[Any] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : List[str] = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(__snake_case )
UpperCAmelCase : Any = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : str = shard(__snake_case )
UpperCAmelCase : Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
UpperCAmelCase : Tuple = scheduler.create_state()
UpperCAmelCase : Dict = scheduler_state
UpperCAmelCase : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : int = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 50
UpperCAmelCase : Optional[Any] = jax.device_count()
UpperCAmelCase : Any = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
UpperCAmelCase : str = replicate(__snake_case )
UpperCAmelCase : List[str] = jax.random.split(__snake_case , __snake_case )
UpperCAmelCase : Optional[int] = shard(__snake_case )
UpperCAmelCase : Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def A ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , )
UpperCAmelCase : Dict = replicate(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[str] = shard(__snake_case )
UpperCAmelCase : Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
UpperCAmelCase : int = replicate(__snake_case )
UpperCAmelCase : int = pipeline.prepare_inputs(__snake_case )
UpperCAmelCase : List[Any] = shard(__snake_case )
UpperCAmelCase : Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase : int = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23
| 0
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase = F'''down_blocks.{i}.resnets.{j}.'''
UpperCamelCase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase = F'''down_blocks.{i}.attentions.{j}.'''
UpperCamelCase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase = F'''up_blocks.{i}.resnets.{j}.'''
UpperCamelCase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase = F'''up_blocks.{i}.attentions.{j}.'''
UpperCamelCase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase = F'''down_blocks.{i}.downsamplers.0.conv.'''
UpperCamelCase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase = F'''up_blocks.{i}.upsamplers.0.'''
UpperCamelCase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase = """mid_block.attentions.0."""
UpperCamelCase = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase = F'''mid_block.resnets.{j}.'''
UpperCamelCase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
A_ : Optional[Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
A_ : Tuple = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
A_ : Optional[int] = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
A_ : int = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Dict = v
A_ : Optional[int] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
UpperCamelCase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase = F'''down_blocks.{i}.downsamplers.0.'''
UpperCamelCase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase = F'''up_blocks.{i}.upsamplers.0.'''
UpperCamelCase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
UpperCamelCase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase = F'''mid_block.resnets.{i}.'''
UpperCamelCase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Tuple = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
A_ : List[str] = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Any = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
A_ : str = v.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = v
A_ : Optional[int] = {v: vae_state_dict[k] for k, v in mapping.items()}
A_ : Dict = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
A_ : Union[str, Any] = reshape_weight_for_sd(SCREAMING_SNAKE_CASE )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase = {"""q""": 0, """k""": 1, """v""": 2}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = {}
A_ : Optional[int] = {}
A_ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
A_ : Dict = k[: -len('''.q_proj.weight''' )]
A_ : Dict = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
A_ : Dict = [None, None, None]
A_ : Tuple = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
A_ : Optional[Any] = k[: -len('''.q_proj.bias''' )]
A_ : int = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
A_ : int = [None, None, None]
A_ : Dict = v
continue
A_ : Optional[int] = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
A_ : Tuple = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
A_ : List[str] = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , SCREAMING_SNAKE_CASE )
A_ : Tuple = torch.cat(SCREAMING_SNAKE_CASE )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
UpperCamelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
UpperCamelCase = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase = load_file(unet_path, device="""cpu""")
else:
UpperCamelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
UpperCamelCase = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
UpperCamelCase = load_file(vae_path, device="""cpu""")
else:
UpperCamelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
UpperCamelCase = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
UpperCamelCase = load_file(text_enc_path, device="""cpu""")
else:
UpperCamelCase = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
UpperCamelCase = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
UpperCamelCase = convert_unet_state_dict(unet_state_dict)
UpperCamelCase = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase = convert_vae_state_dict(vae_state_dict)
UpperCamelCase = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 65
|
from manim import *
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Optional[Any] = Text('''CPU''' , font_size=24 )
A_ : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Dict = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = Text('''Model''' , font_size=24 )
A_ : List[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , )
A_ : int = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Dict = []
A_ : int = []
A_ : Optional[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A_ : Union[str, Any] = 0.4_6 / 4
A_ : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 65
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple , __UpperCAmelCase : Any):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
a : Dict = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = "sshleifer/tiny-gpt2"
a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Union[str, Any] = PyTorchBenchmark(__UpperCAmelCase)
a : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Optional[int]):
a : List[str] = "sgugger/tiny-distilbert-classification"
a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
a : Optional[int] = PyTorchBenchmark(__UpperCAmelCase)
a : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Any):
a : List[str] = "sshleifer/tiny-gpt2"
a : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : str = PyTorchBenchmark(__UpperCAmelCase)
a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def __snake_case ( self : str):
a : Any = "sshleifer/tiny-gpt2"
a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Any = PyTorchBenchmark(__UpperCAmelCase)
a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Tuple):
a : Any = "sshleifer/tiny-gpt2"
a : Any = AutoConfig.from_pretrained(__UpperCAmelCase)
# set architectures equal to `None`
a : List[Any] = None
a : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : List[Any] = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : List[str]):
a : Tuple = "sshleifer/tiny-gpt2"
a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Dict = PyTorchBenchmark(__UpperCAmelCase)
a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def __snake_case ( self : Optional[Any]):
a : Any = "sshleifer/tiny-gpt2"
a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
a : str = PyTorchBenchmark(__UpperCAmelCase)
a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : int):
a : Optional[Any] = "sshleifer/tiny-gpt2"
a : str = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : int = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Any):
a : str = "sshleifer/tinier_bart"
a : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : List[str] = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Optional[Any]):
a : Dict = "sshleifer/tiny-gpt2"
a : Dict = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : int = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : Optional[int]):
a : Any = "sshleifer/tinier_bart"
a : Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase)
a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
a : Dict = PyTorchBenchmark(__UpperCAmelCase , configs=[config])
a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : int):
a : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__UpperCAmelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__UpperCAmelCase , "train_time.csv") , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv") , multi_process=__UpperCAmelCase , )
a : List[str] = PyTorchBenchmark(__UpperCAmelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv")).exists())
def __snake_case ( self : int):
a : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : str):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential"))
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative"))
self.assertTrue(hasattr(__UpperCAmelCase , "current"))
self.assertTrue(hasattr(__UpperCAmelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt") , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
a : Optional[Any] = PyTorchBenchmark(__UpperCAmelCase)
a : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt")).exists())
| 40
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323
| 0
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''facebook/bart-large-mnli'''
lowercase__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowercase__ = '''text_classifier'''
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSequenceClassification
lowercase__ = ['''text''', ['''text''']]
lowercase__ = ['''text''']
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setup()
__UpperCamelCase =self.model.config
__UpperCamelCase =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__UpperCamelCase =int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =outputs.logits
__UpperCamelCase =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 85
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a__ = DebertaTokenizer
a__ = True
a__ = DebertaTokenizerFast
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
__magic_name__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__magic_name__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__magic_name__ = {"""unk_token""": """[UNK]"""}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
def _lowercase ( self : List[Any] , **UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _lowercase ( self : Dict , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = """lower newer"""
__magic_name__ = """lower newer"""
return input_text, output_text
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = self.get_tokenizer()
__magic_name__ = """lower newer"""
__magic_name__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__magic_name__ = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizer()
__magic_name__ = tokenizer("""Hello""" , """World""" )
__magic_name__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCamelCase_ )
@slow
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__magic_name__ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__magic_name__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__magic_name__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__magic_name__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self : str ) -> int:
"""simple docstring"""
__magic_name__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__magic_name__ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__magic_name__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
__magic_name__ = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
__magic_name__ = [tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for seq in encoding["""input_ids"""]]
# fmt: off
__magic_name__ = {
"""input_ids""": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__magic_name__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCamelCase_ )
for expected, decoded in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 88
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
A : List[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(_UpperCamelCase , np.ndarray ):
return list(tensor.shape )
__lowerCAmelCase = tf.shape(_UpperCamelCase )
if tensor.shape == tf.TensorShape(_UpperCamelCase ):
return dynamic
__lowerCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=_UpperCamelCase , name=_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-5 , _UpperCamelCase=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
__lowerCAmelCase = tf.nn.moments(_UpperCamelCase , axes=[axis] , keepdims=_UpperCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowerCAmelCase = [1] * inputs.shape.rank
__lowerCAmelCase = shape_list(_UpperCamelCase )[axis]
__lowerCAmelCase = tf.reshape(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = tf.reshape(_UpperCamelCase , _UpperCamelCase )
# Compute layer normalization using the batch_normalization
# function.
__lowerCAmelCase = tf.nn.batch_normalization(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , offset=_UpperCamelCase , scale=_UpperCamelCase , variance_epsilon=_UpperCamelCase , )
return outputs
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowerCAmelCase = tf.shape(_UpperCamelCase )
__lowerCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowerCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , tf.Tensor ):
__lowerCAmelCase = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowerCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowerCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
_UpperCamelCase , tf.cast(_UpperCamelCase , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowerCAmelCase = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
__lowerCAmelCase = np.asarray(_UpperCamelCase )
__lowerCAmelCase = 1
__lowerCAmelCase = np.array_split(_UpperCamelCase , _UpperCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowerCAmelCase = np.array_split(_UpperCamelCase , _UpperCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCamelCase ):
__lowerCAmelCase = chunk_data
else:
__lowerCAmelCase = data
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if name in group.attrs:
__lowerCAmelCase = [n.decode("utf8" ) if hasattr(_UpperCamelCase , "decode" ) else n for n in group.attrs[name]]
else:
__lowerCAmelCase = []
__lowerCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(_UpperCamelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def _expand_single_ad_tensor(_UpperCamelCase ):
if isinstance(_UpperCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _UpperCamelCase )
| 357
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Union[str, Any] = "https://openaipublic.azureedge.net/jukebox/models/"
A : Tuple = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__lowerCAmelCase = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__lowerCAmelCase = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCAmelCase = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__lowerCAmelCase = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
import re
__lowerCAmelCase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_conv_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_encoder_block_conv_in.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_encoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_proj_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__lowerCAmelCase = re_encoder_block_proj_out.sub(_UpperCamelCase , _UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_conv_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_decoder_block_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_decoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_proj_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__lowerCAmelCase = re_decoder_block_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_conv_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_prior_cond_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_prior_cond_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_proj_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__lowerCAmelCase = re_prior_cond_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# keep original key
else:
__lowerCAmelCase = original_key
__lowerCAmelCase = replace_key(_UpperCamelCase )
if f"{key_prefix}.{key}" not in model_state_dict or key is None:
print(f"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
__lowerCAmelCase = model_state_dict[f"{key_prefix}.{key}"]
print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__lowerCAmelCase = original_key
__lowerCAmelCase = original_key
__lowerCAmelCase = value
return new_dict
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__lowerCAmelCase = requests.get(f"{PREFIX}{file}" , allow_redirects=_UpperCamelCase )
os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=_UpperCamelCase )
open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
__lowerCAmelCase = MODEL_MAPPING[model_name.split("/" )[-1]]
__lowerCAmelCase = JukeboxConfig.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = JukeboxModel(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = {}
for i, dict_name in enumerate(_UpperCamelCase ):
__lowerCAmelCase = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
__lowerCAmelCase = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__lowerCAmelCase = old_dic[k]
elif k.endswith(".w" ):
__lowerCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCAmelCase = old_dic[k]
else:
__lowerCAmelCase = old_dic[k]
__lowerCAmelCase = "vqvae" if i == 0 else f"priors.{3 - i}"
__lowerCAmelCase = fix_jukebox_keys(_UpperCamelCase , model.state_dict() , _UpperCamelCase , _UpperCamelCase )
weight_dict.append(_UpperCamelCase )
__lowerCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
with open(f"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
return weight_dict
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
A : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259
| 0
|
from __future__ import annotations
from collections.abc import MutableSequence
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[int] , snake_case :int , snake_case :MutableSequence[float] ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
A_ : list[float] = list(SCREAMING_SNAKE_CASE_ )
A_ : int = degree
def __add__( self :Tuple , snake_case :Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
A_ : List[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
A_ : Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self :List[Any] , snake_case :Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self :Any ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self :List[str] , snake_case :Polynomial ):
'''simple docstring'''
A_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int | float ):
'''simple docstring'''
A_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self :Union[str, Any] ):
'''simple docstring'''
return self.__str__()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : list[float] = [0] * self.degree
for i in range(self.degree ):
A_ : Union[str, Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :int | float = 0 ):
'''simple docstring'''
A_ : list[float] = [0] * (self.degree + 2)
A_ : str = constant
for i in range(self.degree + 1 ):
A_ : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self :Any , snake_case :object ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self :Optional[Any] , snake_case :object ):
'''simple docstring'''
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 300
|
"""simple docstring"""
import re
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : str = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
try:
lowerCAmelCase_ : int = split_input(lowerCAmelCase__ )
if upper:
lowerCAmelCase_ : Optional[int] = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase_ : Any = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
return to_simple_case(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
try:
lowerCAmelCase_ : Dict = to_simple_case(lowerCAmelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ) -> str:
"""simple docstring"""
return to_complex_case(lowerCAmelCase__ , lowerCAmelCase__ , '_' )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ) -> str:
"""simple docstring"""
return to_complex_case(lowerCAmelCase__ , lowerCAmelCase__ , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 224
| 0
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case : List[str] =re.compile(R'\b(a|an|the)\b', re.UNICODE)
__snake_case : Optional[Any] =None
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''')
parser.add_argument('''data_file''' ,metavar='''data.json''' ,help='''Input data JSON file.''')
parser.add_argument('''pred_file''' ,metavar='''pred.json''' ,help='''Model predictions.''')
parser.add_argument(
'''--out-file''' ,'''-o''' ,metavar='''eval.json''' ,help='''Write accuracy metrics to file (default is stdout).''')
parser.add_argument(
'''--na-prob-file''' ,'''-n''' ,metavar='''na_prob.json''' ,help='''Model estimates of probability of no answer.''')
parser.add_argument(
'''--na-prob-thresh''' ,'''-t''' ,type=lowerCamelCase_ ,default=1.0 ,help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' ,)
parser.add_argument(
'''--out-image-dir''' ,'''-p''' ,metavar='''out_images''' ,default=lowerCamelCase_ ,help='''Save precision-recall curves to directory.''')
parser.add_argument('''--verbose''' ,'''-v''' ,action='''store_true''')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ : int = bool(qa['''answers''']['''text'''])
return qid_to_has_ans
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
def remove_articles(lowerCamelCase_ : Tuple):
return ARTICLES_REGEX.sub(''' ''' ,lowerCamelCase_)
def white_space_fix(lowerCamelCase_ : Optional[int]):
return " ".join(text.split())
def remove_punc(lowerCamelCase_ : str):
lowerCAmelCase__ : Any = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase_ : Dict):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_))))
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowerCamelCase_).split()
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
return int(normalize_answer(lowerCamelCase_) == normalize_answer(lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = get_tokens(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = get_tokens(lowerCamelCase_)
lowerCAmelCase__ : Dict = collections.Counter(lowerCamelCase_) & collections.Counter(lowerCamelCase_)
lowerCAmelCase__ : Dict = sum(common.values())
if len(lowerCamelCase_) == 0 or len(lowerCamelCase_) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase__ : List[str] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ : Dict = qa['''id''']
lowerCAmelCase__ : str = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowerCamelCase_)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase__ : Dict = ['''''']
if qid not in preds:
print(f"""Missing prediction for {qid}""")
continue
lowerCAmelCase__ : List[Any] = preds[qid]
# Take max over all gold answers
lowerCAmelCase__ : Optional[int] = max(compute_exact(lowerCamelCase_ ,lowerCamelCase_) for a in gold_answers)
lowerCAmelCase__ : Any = max(compute_fa(lowerCamelCase_ ,lowerCamelCase_) for a in gold_answers)
return exact_scores, fa_scores
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : str ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = {}
for qid, s in scores.items():
lowerCAmelCase__ : List[str] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase__ : List[Any] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase__ : Optional[Any] = s
return new_scores
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Tuple=None):
'''simple docstring'''
if not qid_list:
lowerCAmelCase__ : Union[str, Any] = len(lowerCamelCase_)
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values()) / total),
('''f1''', 100.0 * sum(fa_scores.values()) / total),
('''total''', total),
])
else:
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_)
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list) / total),
('''total''', total),
])
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : int):
'''simple docstring'''
for k in new_eval:
lowerCAmelCase__ : Tuple = new_eval[k]
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
plt.step(lowerCamelCase_ ,lowerCamelCase_ ,color='''b''' ,alpha=0.2 ,where='''post''')
plt.fill_between(lowerCamelCase_ ,lowerCamelCase_ ,step='''post''' ,alpha=0.2 ,color='''b''')
plt.xlabel('''Recall''')
plt.ylabel('''Precision''')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(lowerCamelCase_)
plt.savefig(lowerCamelCase_)
plt.clf()
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Dict ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Tuple=None ,lowerCamelCase_ : Optional[int]=None):
'''simple docstring'''
lowerCAmelCase__ : str = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_: na_probs[k])
lowerCAmelCase__ : List[str] = 0.0
lowerCAmelCase__ : Optional[Any] = 1.0
lowerCAmelCase__ : Optional[Any] = 0.0
lowerCAmelCase__ : Optional[Any] = [1.0]
lowerCAmelCase__ : Tuple = [0.0]
lowerCAmelCase__ : List[str] = 0.0
for i, qid in enumerate(lowerCamelCase_):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase__ : List[Any] = true_pos / float(i + 1)
lowerCAmelCase__ : Any = true_pos / float(lowerCamelCase_)
if i == len(lowerCamelCase_) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase_)
recalls.append(lowerCamelCase_)
if out_image:
plot_pr_curve(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Any ,lowerCamelCase_ : List[str]):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowerCamelCase_):
os.makedirs(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase__ : Optional[Any] = make_precision_recall_eval(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,out_image=os.path.join(lowerCamelCase_ ,'''pr_exact.png''') ,title='''Precision-Recall curve for Exact Match score''' ,)
lowerCAmelCase__ : Any = make_precision_recall_eval(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,out_image=os.path.join(lowerCamelCase_ ,'''pr_f1.png''') ,title='''Precision-Recall curve for F1 score''' ,)
lowerCAmelCase__ : Optional[Any] = {k: float(lowerCamelCase_) for k, v in qid_to_has_ans.items()}
lowerCAmelCase__ : List[Any] = make_precision_recall_eval(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,out_image=os.path.join(lowerCamelCase_ ,'''pr_oracle.png''') ,title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' ,)
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''pr_exact''')
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''pr_f1''')
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''pr_oracle''')
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
if not qid_list:
return
lowerCAmelCase__ : List[str] = [na_probs[k] for k in qid_list]
lowerCAmelCase__ : Optional[Any] = np.ones_like(lowerCamelCase_) / float(len(lowerCamelCase_))
plt.hist(lowerCamelCase_ ,weights=lowerCamelCase_ ,bins=20 ,range=(0.0, 1.0))
plt.xlabel('''Model probability of no-answer''')
plt.ylabel('''Proportion of dataset''')
plt.title(f"""Histogram of no-answer probability: {name}""")
plt.savefig(os.path.join(lowerCamelCase_ ,f"""na_prob_hist_{name}.png"""))
plt.clf()
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase__ : int = num_no_ans
lowerCAmelCase__ : str = cur_score
lowerCAmelCase__ : Tuple = 0.0
lowerCAmelCase__ : Tuple = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_: na_probs[k])
for i, qid in enumerate(lowerCamelCase_):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase__ : Any = scores[qid]
else:
if preds[qid]:
lowerCAmelCase__ : str = -1
else:
lowerCAmelCase__ : int = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase__ : Any = cur_score
lowerCAmelCase__ : Any = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase_), best_thresh
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = find_best_thresh(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = find_best_thresh(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Any = best_exact
lowerCAmelCase__ : str = exact_thresh
lowerCAmelCase__ : Dict = best_fa
lowerCAmelCase__ : Optional[Any] = fa_thresh
def lowerCAmelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file) as f:
lowerCAmelCase__ : List[Any] = json.load(lowerCamelCase_)
lowerCAmelCase__ : int = dataset_json['''data''']
with open(OPTS.pred_file) as f:
lowerCAmelCase__ : Any = json.load(lowerCamelCase_)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase__ : List[Any] = json.load(lowerCamelCase_)
else:
lowerCAmelCase__ : str = {k: 0.0 for k in preds}
lowerCAmelCase__ : Union[str, Any] = make_qid_to_has_ans(lowerCamelCase_) # maps qid to True/False
lowerCAmelCase__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase__ : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_raw_scores(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Dict = apply_no_ans_threshold(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,OPTS.na_prob_thresh)
lowerCAmelCase__ : Tuple = apply_no_ans_threshold(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,OPTS.na_prob_thresh)
lowerCAmelCase__ : Any = make_eval_dict(lowerCamelCase_ ,lowerCamelCase_)
if has_ans_qids:
lowerCAmelCase__ : Optional[int] = make_eval_dict(lowerCamelCase_ ,lowerCamelCase_ ,qid_list=lowerCamelCase_)
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''HasAns''')
if no_ans_qids:
lowerCAmelCase__ : List[Any] = make_eval_dict(lowerCamelCase_ ,lowerCamelCase_ ,qid_list=lowerCamelCase_)
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''NoAns''')
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,OPTS.out_image_dir)
histogram_na_prob(lowerCamelCase_ ,lowerCamelCase_ ,OPTS.out_image_dir ,'''hasAns''')
histogram_na_prob(lowerCamelCase_ ,lowerCamelCase_ ,OPTS.out_image_dir ,'''noAns''')
if OPTS.out_file:
with open(OPTS.out_file ,'''w''') as f:
json.dump(lowerCamelCase_ ,lowerCamelCase_)
else:
print(json.dumps(lowerCamelCase_ ,indent=2))
if __name__ == "__main__":
__snake_case : List[str] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 94
|
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = 1, 1
lowerCAmelCase__ : Any = 2
while True:
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Any = fa + fa
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = fa, f
index += 1
for _ in str(lowerCamelCase_):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94
| 1
|
UpperCamelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = input("Enter message: " )
UpperCAmelCase__ = input("Enter key [alphanumeric]: " )
UpperCAmelCase__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase__ = "encrypt"
UpperCAmelCase__ = encrypt_message(__A, __A )
elif mode.lower().startswith("d" ):
UpperCAmelCase__ = "decrypt"
UpperCAmelCase__ = decrypt_message(__A, __A )
print(f"""\n{mode.title()}ed message:""" )
print(__A )
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
return translate_message(__A, __A, "encrypt" )
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
return translate_message(__A, __A, "decrypt" )
def lowerCAmelCase_ ( __A, __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = key.upper()
for symbol in message:
UpperCAmelCase__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__A )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__A ):
UpperCAmelCase__ = 0
else:
translated.append(__A )
return "".join(__A )
if __name__ == "__main__":
main()
| 65
|
import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65
| 1
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = str(id_ )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = []
UpperCamelCase = {} # {vertex:distance}
def __lt__( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.id
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self.neighbors.append(_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = weight
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = []
for u in graph:
UpperCamelCase = math.inf
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = graph[:]
while q:
UpperCamelCase = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase = u
UpperCamelCase = u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Iterator[tuple]:
for u in graph:
UpperCamelCase = math.inf
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
UpperCamelCase = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase = u
UpperCamelCase = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( )-> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183
| 0
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A( A__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="None" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ (self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = 3_00
return config
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaModel(config=__A )
model.to(__A )
model.eval()
UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A )[0]
UpperCamelCase__ = model(__A , token_type_ids=__A )[0]
UpperCamelCase__ = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCamelCase__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DebertaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCamelCase__ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
UpperCamelCase__
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
@slow
def UpperCAmelCase_ (self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase_ (self ):
pass
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
UpperCamelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 244
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :List[Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 84
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class snake_case ( unittest.TestCase ):
def __init__( self : List[str] , A : Union[str, Any] , A : Optional[Any]=1_3 , A : List[Any]=3_0 , A : List[Any]=2 , A : Optional[Any]=3 , A : Union[str, Any]=True , A : Union[str, Any]=True , A : Optional[int]=3_2 , A : Tuple=5 , A : List[str]=4 , A : List[Any]=3_7 , A : Optional[Any]="gelu" , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=1_0 , A : Union[str, Any]=0.02 , ):
'''simple docstring'''
a : Optional[Any] = parent
a : Tuple = batch_size
a : int = image_size
a : str = patch_size
a : List[str] = num_channels
a : List[str] = is_training
a : List[str] = use_labels
a : Optional[int] = hidden_size
a : Optional[Any] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : str = intermediate_size
a : List[str] = hidden_act
a : List[str] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[Any] = type_sequence_label_size
a : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = (image_size // patch_size) ** 2
a : List[Any] = num_patches + 1
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase__ ( self : Union[str, Any] , A : str , A : Union[str, Any] ):
'''simple docstring'''
a : Tuple = FlaxViTModel(config=A )
a : int = model(A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (self.image_size, self.image_size)
a : List[str] = (self.patch_size, self.patch_size)
a : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , A : Dict , A : Optional[int] ):
'''simple docstring'''
a : Optional[Any] = self.type_sequence_label_size
a : List[Any] = FlaxViTForImageClassification(config=A )
a : Tuple = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : Dict = 1
a : Tuple = FlaxViTForImageClassification(A )
a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Optional[int] = model(A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
), (
a
),
) : Dict = config_and_inputs
a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Any = FlaxViTModelTester(self )
a : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a, a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Tuple = model_class(A )
a : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a, a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : List[Any] = self._prepare_for_class(A , A )
a : Tuple = model_class(A )
@jax.jit
def model_jitted(A : Tuple , **A : int ):
return model(pixel_values=A , **A )
with self.subTest('JIT Enabled' ):
a : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : List[str] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a : List[str] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
a : Optional[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
| 186
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 186
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[float] ,_lowerCamelCase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Sequence[float] ,_lowerCamelCase : float ) -> float:
_lowerCAmelCase : Union[str, Any] = 0.0
for coeff in reversed(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_a : Tuple = (0.0, 0.0, 5.0, 9.3, 7.0)
_a : List[str] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 44
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = size if size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase :Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCamelCase :Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
UpperCamelCase :Optional[int] = do_resize
UpperCamelCase :int = do_rescale
UpperCamelCase :Tuple = do_normalize
UpperCamelCase :str = do_center_crop
UpperCamelCase :int = crop_size
UpperCamelCase :Tuple = size
UpperCamelCase :List[str] = resample
UpperCamelCase :Tuple = rescale_factor
UpperCamelCase :Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase :Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
UpperCamelCase :Dict = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
UpperCamelCase :str = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase :Optional[int] = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
UpperCamelCase :Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> BatchFeature:
UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase :Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase :Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase :Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = resample if resample is not None else self.resample
UpperCamelCase :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :Optional[Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase :Dict = image_std if image_std is not None else self.image_std
UpperCamelCase :Dict = size if size is not None else self.size
UpperCamelCase :Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if not is_batched(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :str = [images]
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase :Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase :List[Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase :Tuple = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase :Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase :Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :List[str] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 259
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 362
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : Dict = '''facebook/wmt19-en-de'''
snake_case : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : Optional[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
snake_case : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
snake_case : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
snake_case : Optional[int] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 94
|
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ):
"""simple docstring"""
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a :List[Any] = sorted(UpperCAmelCase_ )
# declaring useful variables
a :Dict = len(UpperCAmelCase_ )
a :Tuple = 0
a :List[Any] = 0
a :str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a :List[Any] = sorted_profit_by_weight[length - i - 1]
a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ )
a :Optional[int] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : str = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 94
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DDIMParallelScheduler,)
_a = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ):
lowercase :List[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = self.scheduler_classes[0]
lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[str] = scheduler_class(**_lowerCAmelCase )
lowercase :str = 10, 0.0
lowercase :List[Any] = self.dummy_model()
lowercase :int = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
lowercase :Optional[Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config()
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :Union[str, Any] = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase )
lowercase :Union[str, Any] = 10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
lowercase :Dict = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1
lowercase :int = self.dummy_sample_deter - 0.1
lowercase :Dict = samplea.shape[0]
lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Dict = self.full_loop(prediction_type="v_prediction" )
lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Any ):
# We specify different beta, so that the first alpha is 0.99
lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 360
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase :Optional[int] = []
for i in range(lowerCamelCase ):
lowercase :Any = i / num_diffusion_timesteps
lowercase :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) )
return torch.tensor(lowerCamelCase, dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ):
if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None:
lowercase :Optional[int] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase :str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase :Dict = 1.0 - self.betas
lowercase :Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Union[str, Any] = 1.0
# setable values
lowercase :str = None
lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowercase :List[Any] = num_inference_steps
lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ):
# 1. get previous step value (=t+1)
lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :Optional[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase :Union[str, Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self: List[str] ):
return self.config.num_train_timesteps
| 158
| 0
|
from __future__ import annotations
import requests
def UpperCamelCase (lowercase_: str ) -> dict:
A__ : List[str] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCamelCase ).json()
def UpperCamelCase (lowercase_: int = 10 ) -> list[dict]:
A__ : List[str] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A__ : int = requests.get(_lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCamelCase ) for story_id in story_ids]
def UpperCamelCase (lowercase_: int = 10 ) -> str:
A__ : str = hackernews_top_stories(_lowerCamelCase )
return "\n".join("""* [{title}]({url})""".format(**_lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 192
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict = """Salesforce/blip-image-captioning-base"""
SCREAMING_SNAKE_CASE : int = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
SCREAMING_SNAKE_CASE : Optional[int] = """image_captioner"""
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForVisionaSeq
SCREAMING_SNAKE_CASE : int = ["""image"""]
SCREAMING_SNAKE_CASE : Optional[Any] = ["""text"""]
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
requires_backends(self , ['vision'] )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : "Image" ) -> Dict:
return self.pre_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
return self.model.generate(**__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
return self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )[0].strip()
| 183
| 0
|
lowercase_ = """Tobias Carryer"""
from time import time
class _snake_case :
def __init__( self : Optional[int], __lowercase : Dict, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : Optional[Any]=int(time() ) ): # noqa: B008
lowercase__ = multiplier
lowercase__ = increment
lowercase__ = modulo
lowercase__ = seed
def A__ ( self : Optional[Any] ):
lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowercase_ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 224
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _snake_case ( lowercase__):
UpperCamelCase__ : str ="""dpr"""
def __init__( self : int, __lowercase : List[str]=3_0522, __lowercase : Any=768, __lowercase : Union[str, Any]=12, __lowercase : Optional[int]=12, __lowercase : List[Any]=3072, __lowercase : List[str]="gelu", __lowercase : Union[str, Any]=0.1, __lowercase : str=0.1, __lowercase : List[str]=512, __lowercase : Optional[int]=2, __lowercase : Dict=0.02, __lowercase : Any=1e-1_2, __lowercase : List[Any]=0, __lowercase : str="absolute", __lowercase : int = 0, **__lowercase : str, ):
super().__init__(pad_token_id=__lowercase, **__lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = projection_dim
lowercase__ = position_embedding_type
| 224
| 1
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : str = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 186
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileViTFeatureExtractor"""]
UpperCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186
| 1
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Union[str, Any]:
_a : Any = tesseract_config if tesseract_config is not None else ''
# apply OCR
_a : List[str] = to_pil_image(lowerCAmelCase_ )
_a , _a : List[Any] = pil_image.size
_a : Any = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type='dict' , config=lowerCAmelCase_ )
_a , _a , _a , _a , _a : str = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_a : List[Any] = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
_a : Tuple = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
_a : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
_a : int = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
_a : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : Dict = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_a : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
_a : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = ['pixel_values']
def __init__( self : List[str] ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Dict[str, int] = None ,_UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[str] = "" ,**_UpperCAmelCase : Dict ,):
super().__init__(**_UpperCAmelCase )
_a : Dict = size if size is not None else {'height': 224, 'width': 224}
_a : Union[str, Any] = get_size_dict(_UpperCAmelCase )
_a : List[Any] = do_resize
_a : Dict = size
_a : str = resample
_a : Dict = apply_ocr
_a : Tuple = ocr_lang
_a : Optional[int] = tesseract_config
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Dict[str, int] ,_UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : List[str] ,):
_a : Optional[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : List[Any] = (size['height'], size['width'])
return resize(_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Dict ,_UpperCAmelCase : ImageInput ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : Dict[str, int] = None ,_UpperCAmelCase : PILImageResampling = None ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST ,**_UpperCAmelCase : Dict ,):
_a : Any = do_resize if do_resize is not None else self.do_resize
_a : Optional[Any] = size if size is not None else self.size
_a : List[Any] = get_size_dict(_UpperCAmelCase )
_a : Optional[Any] = resample if resample is not None else self.resample
_a : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : str = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_UpperCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self ,'pytesseract' )
_a : Tuple = []
_a : str = []
for image in images:
_a , _a : Dict = apply_tesseract(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
words_batch.append(_UpperCAmelCase )
boxes_batch.append(_UpperCAmelCase )
if do_resize:
_a : Any = [self.resize(image=_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Union[str, Any] = [flip_channel_order(_UpperCAmelCase ) for image in images]
_a : int = [to_channel_dimension_format(_UpperCAmelCase ,_UpperCAmelCase ) for image in images]
_a : Union[str, Any] = BatchFeature(data={'pixel_values': images} ,tensor_type=_UpperCAmelCase )
if apply_ocr:
_a : Any = words_batch
_a : Tuple = boxes_batch
return data
| 107
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowerCAmelCase = os.environ.get('''USER_TOKEN''', '''''')
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict[Any, Any]:
_a : Union[str, Any] = {
'Authorization': f"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 107
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowercase (UpperCAmelCase__ ):
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """OwlViTImageProcessor"""
_UpperCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , A_=None , A_=None , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
__lowerCAmelCase : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__lowerCAmelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ) ->int:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__lowerCAmelCase : List[str] = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__lowerCAmelCase : str = []
# Maximum number of queries across batch
__lowerCAmelCase : Dict = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__lowerCAmelCase : Tuple = t + [''' '''] * (max_num_queries - len(_UpperCAmelCase ))
__lowerCAmelCase : Any = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowerCAmelCase : Tuple = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowerCAmelCase : Dict = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCAmelCase : Optional[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowerCAmelCase : int = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCAmelCase : int = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
__lowerCAmelCase : Dict = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCAmelCase : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowerCAmelCase : int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowerCAmelCase : Any = BatchEncoding()
__lowerCAmelCase : Union[str, Any] = input_ids
__lowerCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
__lowerCAmelCase : List[Any] = BatchEncoding()
__lowerCAmelCase : List[str] = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__lowerCAmelCase : List[str] = query_pixel_values
if images is not None:
__lowerCAmelCase : Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__lowerCAmelCase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[str]:
'''simple docstring'''
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Any:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->str:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , *A_ , **A_ ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _UpperCAmelCase , )
return self.image_processor
| 275
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( a__ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase) -> Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCamelCase :Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__lowercase , scheduler=__lowercase)
@torch.no_grad()
def __call__( self , __lowercase = 1 , __lowercase = None , __lowercase = 0.0 , __lowercase = 50 , __lowercase = None , __lowercase = "pil" , __lowercase = True , ) -> Tuple:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __lowercase):
__UpperCamelCase :Any = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__UpperCamelCase :Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__lowercase , __lowercase) and len(__lowercase) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__lowercase)}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
__UpperCamelCase :str = randn_tensor(__lowercase , generator=__lowercase , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(__lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
__UpperCamelCase :Any = self.unet(__lowercase , __lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCamelCase :Union[str, Any] = self.scheduler.step(
__lowercase , __lowercase , __lowercase , eta=__lowercase , use_clipped_model_output=__lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Any = (image / 2 + 0.5).clamp(0 , 1)
__UpperCamelCase :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCamelCase :str = self.numpy_to_pil(__lowercase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase)
| 370
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = PhobertTokenizer
a__ : Union[str, Any] = False
def UpperCamelCase__ ( self) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase :Union[str, Any] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__UpperCamelCase :int = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :Dict = ['''#version: 0.2''', '''l à</w>''']
__UpperCamelCase :Any = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
def UpperCamelCase__ ( self , **__lowercase) -> Optional[Any]:
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :List[Any] = '''Tôi là VinAI Research'''
__UpperCamelCase :List[str] = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__UpperCamelCase :List[Any] = '''Tôi là VinAI Research'''
__UpperCamelCase :List[str] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__UpperCamelCase :int = tokenizer.tokenize(__lowercase)
print(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :Dict = tokens + [tokenizer.unk_token]
__UpperCamelCase :Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase) , __lowercase)
| 105
| 0
|
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase__ = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Optional[int]:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ''
else:
snake_case_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = dct.pop(_UpperCamelCase )
snake_case_ = val
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = ViTMSNConfig()
snake_case_ = 1000
snake_case_ = 'datasets/huggingface/label-files'
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , 'r' ) )
snake_case_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 6
elif "l16" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = 0.1
elif "b4" in checkpoint_url:
snake_case_ = 4
elif "l7" in checkpoint_url:
snake_case_ = 7
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = 0.1
snake_case_ = ViTMSNModel(_UpperCamelCase )
snake_case_ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' )['target_encoder']
snake_case_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCamelCase )
snake_case_ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
snake_case_ = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
snake_case_ = model(**_UpperCamelCase )
snake_case_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
snake_case_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
snake_case_ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
snake_case_ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
snake_case_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
snake_case_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
lowerCAmelCase_ : int = int(lowerCAmelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase__ )
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = divmod(lowerCAmelCase__ , 2 )
return binary_recursive(lowerCAmelCase__ ) + str(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
lowerCAmelCase_ : int = str(lowerCAmelCase__ ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase_ : str = '-' if number.startswith('-' ) else ''
lowerCAmelCase_ : Union[str, Any] = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return f"{negative}0b{binary_recursive(int(lowerCAmelCase__ ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 224
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
import xla_spawn
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = time()
xla_spawn.main()
lowerCAmelCase_ : Any = time()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
import xla_spawn
lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
xla_spawn.main()
| 224
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 369
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81
| 0
|
def __magic_name__ ( A : str, A : list[str] ):
'''simple docstring'''
a = ""
for word_or_phrase in separated:
if not isinstance(A, A ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 107
|
def __magic_name__ ( A : str ):
'''simple docstring'''
a = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __magic_name__ ( A : str ):
'''simple docstring'''
a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a = remove_duplicates(key.upper() )
a = len(A )
# First fill cipher with key characters
a = {alphabet[i]: char for i, char in enumerate(A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A ), 26 ):
a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a = alphabet[i - offset]
a = char
return cipher_alphabet
def __magic_name__ ( A : str, A : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(A, A ) for ch in message.upper() )
def __magic_name__ ( A : str, A : dict[str, str] ):
'''simple docstring'''
a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A, A ) for ch in message.upper() )
def __magic_name__ ( ):
'''simple docstring'''
a = input("Enter message to encode or decode: " ).strip()
a = input("Enter keyword: " ).strip()
a = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
a = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
a = create_cipher_map(A )
print(func(A, A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 107
| 1
|
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( A ):
UpperCamelCase = '''SpeechT5FeatureExtractor'''
UpperCamelCase = '''SpeechT5Tokenizer'''
def __init__( self : Optional[int] , A : Any , A : List[Any]) -> str:
"""simple docstring"""
super().__init__(A , A)
def __call__( self : int , *A : Optional[int] , **A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('audio' , A)
_UpperCAmelCase = kwargs.pop('text' , A)
_UpperCAmelCase = kwargs.pop('text_target' , A)
_UpperCAmelCase = kwargs.pop('audio_target' , A)
_UpperCAmelCase = kwargs.pop('sampling_rate' , A)
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?')
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.')
if audio is not None:
_UpperCAmelCase = self.feature_extractor(A , *A , sampling_rate=A , **A)
elif text is not None:
_UpperCAmelCase = self.tokenizer(A , **A)
else:
_UpperCAmelCase = None
if audio_target is not None:
_UpperCAmelCase = self.feature_extractor(audio_target=A , *A , sampling_rate=A , **A)
_UpperCAmelCase = targets['input_values']
elif text_target is not None:
_UpperCAmelCase = self.tokenizer(A , **A)
_UpperCAmelCase = targets['input_ids']
else:
_UpperCAmelCase = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase = labels
_UpperCAmelCase = targets.get('attention_mask')
if decoder_attention_mask is not None:
_UpperCAmelCase = decoder_attention_mask
return inputs
def _lowerCamelCase ( self : int , *A : Optional[Any] , **A : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('input_values' , A)
_UpperCAmelCase = kwargs.pop('input_ids' , A)
_UpperCAmelCase = kwargs.pop('labels' , A)
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.')
if input_values is not None:
_UpperCAmelCase = self.feature_extractor.pad(A , *A , **A)
elif input_ids is not None:
_UpperCAmelCase = self.tokenizer.pad(A , **A)
else:
_UpperCAmelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(A , A) and "input_ids" in labels[0]):
_UpperCAmelCase = self.tokenizer.pad(A , **A)
_UpperCAmelCase = targets['input_ids']
else:
_UpperCAmelCase = self.feature_extractor.feature_size
_UpperCAmelCase = self.feature_extractor.num_mel_bins
_UpperCAmelCase = self.feature_extractor.pad(A , *A , **A)
_UpperCAmelCase = feature_size_hack
_UpperCAmelCase = targets['input_values']
else:
_UpperCAmelCase = None
if inputs is None:
return targets
if targets is not None:
_UpperCAmelCase = labels
_UpperCAmelCase = targets.get('attention_mask')
if decoder_attention_mask is not None:
_UpperCAmelCase = decoder_attention_mask
return inputs
def _lowerCamelCase ( self : str , *A : Optional[Any] , **A : Optional[Any]) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A , **A)
def _lowerCamelCase ( self : List[str] , *A : List[Any] , **A : Dict) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*A , **A)
| 290
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A ( ) -> tuple[list[int], int]:
'''simple docstring'''
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
UpperCAmelCase__ = make_dataset()
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(_UpperCAmelCase , 3 ):
if sum(_UpperCAmelCase ) == target:
return tuple(sorted(_UpperCAmelCase ) )
return (0, 0, 0)
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
_UpperCAmelCase = len(_UpperCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A ( ) -> tuple[float, float]:
'''simple docstring'''
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=_UpperCAmelCase , stmt=_UpperCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=_UpperCAmelCase , stmt=_UpperCAmelCase , repeat=5 , number=10_000 )
return (min(_UpperCAmelCase ), min(_UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 290
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, ):
output_path.parent.mkdir(parents=__lowerCamelCase, exist_ok=__lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCamelCase, __lowerCamelCase, f=output_path.as_posix(), input_names=__lowerCamelCase, output_names=__lowerCamelCase, dynamic_axes=__lowerCamelCase, do_constant_folding=__lowerCamelCase, use_external_data_format=__lowerCamelCase, enable_onnx_checker=__lowerCamelCase, opset_version=__lowerCamelCase, )
else:
export(
__lowerCamelCase, __lowerCamelCase, f=output_path.as_posix(), input_names=__lowerCamelCase, output_names=__lowerCamelCase, dynamic_axes=__lowerCamelCase, do_constant_folding=__lowerCamelCase, opset_version=__lowerCamelCase, )
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ : Optional[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCAmelCase_ : Any = "cpu"
UpperCAmelCase_ : Optional[Any] = Path(__lowerCamelCase )
# VAE DECODER
UpperCAmelCase_ : List[Any] = AutoencoderKL.from_pretrained(model_path + "/vae" )
UpperCAmelCase_ : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ : List[Any] = vae_decoder.decode
onnx_export(
__lowerCamelCase, model_args=(
torch.randn(1, __lowerCamelCase, 25, 25 ).to(device=__lowerCamelCase, dtype=__lowerCamelCase ),
False,
), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
}, opset=__lowerCamelCase, )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 61
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : int ) ->int:
'''simple docstring'''
if len(_lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
a : Optional[Any] = sum(array[:k] )
for i in range(len(_lowercase ) - k ):
a : Optional[Any] = current_sum - array[i] + array[i + k]
a : Union[str, Any] = max(_lowercase , _lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
a : Any = [randint(-1000, 1000) for i in range(100)]
a : List[str] = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 105
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( A__ ):
"""simple docstring"""
_a = ['image_processor', 'tokenizer']
_a = 'BlipImageProcessor'
_a = 'AutoTokenizer'
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Any = self.image_processor
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
UpperCamelCase__ :Optional[int] = self.tokenizer
UpperCamelCase__ :Union[str, Any] = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
UpperCamelCase__ :int = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
UpperCamelCase__ :List[Any] = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
UpperCamelCase__ :int = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.tokenizer.model_input_names
UpperCamelCase__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 219
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
model.eval()
UpperCamelCase__ :List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Any = config['''lr''']
UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] )
UpperCamelCase__ :List[Any] = int(config['''seed'''] )
UpperCamelCase__ :List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[Any] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Any = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :List[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :Any = int(__a ) + 1
UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}'''
UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :int = accuracy
UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :int = epoch
UpperCamelCase__ :Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Optional[int] = parser.parse_args()
UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 219
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self : List[Any] , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : List[str] , ) -> None:
super().__init__(**__snake_case )
UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase : int = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case , param_name='''crop_size''' )
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : str = resample
UpperCAmelCase : Tuple = do_center_crop
UpperCAmelCase : Any = crop_size
UpperCAmelCase : str = do_rescale
UpperCAmelCase : int = rescale_factor
UpperCAmelCase : Union[str, Any] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : List[Any] = do_convert_rgb
def A ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ) -> np.ndarray:
UpperCAmelCase : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(__snake_case , size=size['''shortest_edge'''] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ) -> np.ndarray:
UpperCAmelCase : Tuple = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def A ( self : Any , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> List[str]:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : List[Any] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Dict , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def A ( self : List[str] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Optional[int] , ) -> PIL.Image.Image:
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : str = size if size is not None else self.size
UpperCAmelCase : Dict = get_size_dict(__snake_case , param_name='''size''' , default_to_square=__snake_case )
UpperCAmelCase : List[str] = resample if resample is not None else self.resample
UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Any = get_size_dict(__snake_case , param_name='''crop_size''' , default_to_square=__snake_case )
UpperCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : List[str] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : str = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : Union[str, Any] = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase : int = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase : Optional[Any] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase : Optional[int] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase : int = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
UpperCAmelCase : List[Any] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 23
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_snake_case = getLogger(__name__)
_snake_case = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = DEFAULT_DEVICE,snake_case_=False,snake_case_="summarization",snake_case_=None,**snake_case_,):
_A : Tuple = Path(snake_case_ ).open("""w""",encoding="""utf-8""" )
_A : Optional[int] = str(snake_case_ )
_A : Tuple = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).to(snake_case_ )
if fpaa:
_A : Optional[Any] = model.half()
_A : Any = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
_A : Tuple = time.time()
# update config with task specific params
use_task_specific_params(snake_case_,snake_case_ )
if prefix is None:
_A : Dict = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
for examples_chunk in tqdm(list(chunks(snake_case_,snake_case_ ) ) ):
_A : List[Any] = [prefix + text for text in examples_chunk]
_A : Optional[Any] = tokenizer(snake_case_,return_tensors="""pt""",truncation=snake_case_,padding="""longest""" ).to(snake_case_ )
_A : List[str] = model.generate(
input_ids=batch.input_ids,attention_mask=batch.attention_mask,**snake_case_,)
_A : Optional[Any] = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
_A : Any = int(time.time() - start_time ) # seconds
_A : List[Any] = len(snake_case_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs,4 )}
def lowerCAmelCase_ ( ):
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowerCAmelCase_ ( snake_case_=True ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""",type=snake_case_,help="""where to save summaries""" )
parser.add_argument("""--reference_path""",type=snake_case_,required=snake_case_,help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""",type=snake_case_,required=snake_case_,default="""metrics.json""",help="""where to save metrics""" )
parser.add_argument("""--device""",type=snake_case_,required=snake_case_,default=snake_case_,help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=-1,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--dump-args""",action="""store_true""",help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""",nargs="""?""",type=snake_case_,const=datetime_now(),help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
),)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_A , _A : Optional[int] = parser.parse_known_args()
_A : Dict = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
_A : Dict = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_A : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
_A : int = generate_summaries_or_translations(
snake_case_,args.save_path,args.model_name,batch_size=args.bs,device=args.device,fpaa=args.fpaa,task=args.task,prefix=args.prefix,**snake_case_,)
if args.reference_path is None:
return {}
# Compute scores
_A : Any = calculate_bleu if """translation""" in args.task else calculate_rouge
_A : str = [x.rstrip() for x in open(args.save_path ).readlines()]
_A : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case_ )]
_A : dict = score_fn(snake_case_,snake_case_ )
scores.update(snake_case_ )
if args.dump_args:
scores.update(snake_case_ )
if args.info:
_A : Dict = args.info
if verbose:
print(snake_case_ )
if args.score_path is not None:
json.dump(snake_case_,open(args.score_path,"""w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 343
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343
| 1
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
__snake_case = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case = [0] * args.vocab_size
for k, v in counter.items():
__snake_case = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 348
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__a = logging.get_logger(__name__)
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ):
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , """vision""" )
self.check_model_type(snake_case_ )
def __call__( self : str , snake_case_ : Tuple , **snake_case_ : Any ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCamelCase ( self : int , **snake_case_ : Tuple ):
return {}, {}, {}
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str ):
snake_case__ = load_image(snake_case_ )
snake_case__ = image.size
snake_case__ = self.image_processor(images=snake_case_ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self : List[Any] , snake_case_ : List[Any] ):
snake_case__ = self.model(**snake_case_ )
return model_outputs
def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] ):
snake_case__ = model_outputs.predicted_depth
snake_case__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=snake_case_ )
snake_case__ = prediction.squeeze().cpu().numpy()
snake_case__ = (output * 255 / np.max(snake_case_ )).astype("""uint8""" )
snake_case__ = Image.fromarray(snake_case_ )
snake_case__ = {}
snake_case__ = predicted_depth
snake_case__ = depth
return output_dict
| 367
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=0 ):
snake_case__ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
snake_case__ : List[str] = np.random.RandomState(snake_case_ )
snake_case__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Union[str, Any] = pipe(**snake_case_ ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Tuple = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
snake_case__ : List[Any] = pipe(**self.get_dummy_inputs() )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : Optional[int] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : List[str] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = ort.SessionOptions()
snake_case__ : Optional[Any] = False
return options
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = """A fantasy landscape, trending on artstation"""
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : str = output.images
snake_case__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Optional[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : List[Any] = init_image.resize((768, 512) )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Any = output.images
snake_case__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 43
| 0
|
"""simple docstring"""
class __snake_case ( __lowerCAmelCase ):
pass
class __snake_case ( __lowerCAmelCase ):
pass
class __snake_case :
def __init__( self) -> Optional[Any]:
'''simple docstring'''
a__: Tuple = [
[],
[],
[],
]
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError('Maximum queue size is 100')
self.queues[priority].append(lowercase)
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError('All queues are empty')
def __str__( self) -> str:
'''simple docstring'''
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues))
class __snake_case :
def __init__( self) -> int:
'''simple docstring'''
a__: Optional[int] = []
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError('Maximum queue size is 100')
self.queue.append(lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('The queue is empty')
else:
a__: int = min(self.queue)
self.queue.remove(lowercase)
return data
def __str__( self) -> str:
'''simple docstring'''
return str(self.queue)
def __a ( ) ->int:
a__: List[str] = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __a ( ) ->List[str]:
a__: Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 290
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """roberta-prelayernorm"""
def __init__( self , lowercase=5_02_65 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase)
a__: Union[str, Any] = vocab_size
a__: str = hidden_size
a__: Tuple = num_hidden_layers
a__: List[str] = num_attention_heads
a__: Dict = hidden_act
a__: int = intermediate_size
a__: Tuple = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: Tuple = max_position_embeddings
a__: Tuple = type_vocab_size
a__: Optional[Any] = initializer_range
a__: Tuple = layer_norm_eps
a__: Optional[int] = position_embedding_type
a__: Any = use_cache
a__: Dict = classifier_dropout
class __snake_case ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__: str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__: Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 290
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0_0_0_0_0 ) ->int:
A__ : int = set(range(3, UpperCAmelCase__, 2 ) )
primes.add(2 )
for p in range(3, UpperCAmelCase__, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, UpperCAmelCase__, UpperCAmelCase__ ) ) )
A__ : str = [float(UpperCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase__, limit + 1, UpperCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 296
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 1
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : list[int] ) -> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
if curr_ind == len(__UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__UpperCamelCase ) ):
if valid_connection(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# Insert current vertex into path as next transition
SCREAMING_SNAKE_CASE__ = next_ver
# Validate created path
if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE__ = -1
return False
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : int = 0 ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [-1] * (len(__UpperCamelCase ) + 1)
# initialize start and end of path with starting index
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__UpperCamelCase , __UpperCamelCase , 1 ) else []
| 219
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Dict , _lowercase : CLIPSegForImageSegmentation , _lowercase : CLIPSegProcessor , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def __a ( self : List[Any] , _lowercase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def __a ( self : Any ):
"""simple docstring"""
self.enable_attention_slicing(_lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self : Optional[int] ):
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str]] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : str , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE__ = self.segmentation_model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 219
| 1
|
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase__ = AlbertForPreTraining(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 61
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """cvt"""
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 1_92, 3_84] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Optional[int]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.1] , _UpperCAmelCase : Optional[int]=[True, True, True] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : int=[3, 3, 3] , _UpperCAmelCase : Optional[int]=[1, 1, 1] , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[1, 1, 1] , _UpperCAmelCase : str=[1, 1, 1] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1E-12 , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = patch_stride
UpperCAmelCase__ = patch_padding
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = depth
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = attention_drop_rate
UpperCAmelCase__ = drop_rate
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = cls_token
UpperCAmelCase__ = qkv_projection_method
UpperCAmelCase__ = kernel_qkv
UpperCAmelCase__ = padding_kv
UpperCAmelCase__ = stride_kv
UpperCAmelCase__ = padding_q
UpperCAmelCase__ = stride_q
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
| 61
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Any = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowercase : int = hex_num[0] == """-"""
if is_negative:
lowercase : List[Any] = hex_num[1:]
try:
lowercase : str = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowercase : Dict = """"""
while int_num > 0:
lowercase : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319
| 0
|
def UpperCamelCase ( _a = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 352
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] ="""gpt_bigcode"""
lowercase : Dict =["""past_key_values"""]
lowercase : List[Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=5_0257 , UpperCamelCase_=1024 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=None , UpperCamelCase_="gelu_pytorch_tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
lowercase_ :Any = vocab_size
lowercase_ :List[str] = n_positions
lowercase_ :Union[str, Any] = n_embd
lowercase_ :Dict = n_layer
lowercase_ :Optional[int] = n_head
lowercase_ :List[str] = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :Optional[int] = resid_pdrop
lowercase_ :Union[str, Any] = embd_pdrop
lowercase_ :Any = attn_pdrop
lowercase_ :Optional[Any] = layer_norm_epsilon
lowercase_ :str = initializer_range
lowercase_ :Optional[Any] = scale_attn_weights
lowercase_ :Any = use_cache
lowercase_ :Union[str, Any] = attention_softmax_in_fpaa
lowercase_ :int = scale_attention_softmax_in_fpaa
lowercase_ :Union[str, Any] = multi_query
lowercase_ :List[str] = bos_token_id
lowercase_ :Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69
|
"""simple docstring"""
from typing import Any
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
A__ = None
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
A__ = temp.next
print()
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = Node(UpperCamelCase )
A__ = self.head
A__ = new_node
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
if node_a is None or node_a is None:
return
A__ , A__ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 69
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : List[str]=18 ,lowerCamelCase__ : Tuple=30 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = LevitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = LevitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""size""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 296
|
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_a , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __SCREAMING_SNAKE_CASE ( _a ):
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.framework == "tf":
UpperCamelCase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowerCAmelCase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.get_masked_index(__lowerCAmelCase )
UpperCamelCase__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
if return_tensors is None:
UpperCamelCase__ = self.framework
UpperCamelCase__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.ensure_exactly_one_mask_token(__lowerCAmelCase )
return model_inputs
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.model(**__lowerCAmelCase )
UpperCamelCase__ = model_inputs["""input_ids"""]
return model_outputs
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=5 , __lowerCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase__ = target_ids.shape[0]
UpperCamelCase__ = model_outputs["""input_ids"""][0]
UpperCamelCase__ = model_outputs["""logits"""]
if self.framework == "tf":
UpperCamelCase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase__ = outputs.numpy()
UpperCamelCase__ = outputs[0, masked_index, :]
UpperCamelCase__ = stable_softmax(__lowerCAmelCase , axis=-1 )
if target_ids is not None:
UpperCamelCase__ = tf.gather_nd(tf.squeeze(__lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase__ = tf.expand_dims(__lowerCAmelCase , 0 )
UpperCamelCase__ = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase__ = outputs[0, masked_index, :]
UpperCamelCase__ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase__ = probs[..., target_ids]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(__lowerCAmelCase )
UpperCamelCase__ = []
UpperCamelCase__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase__ = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase__ = target_ids[p].tolist()
UpperCamelCase__ = p
# Filter padding out:
UpperCamelCase__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase__ = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
UpperCamelCase__ = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(__lowerCAmelCase )
result.append(__lowerCAmelCase )
if single_mask:
return result[0]
return result
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = [targets]
try:
UpperCamelCase__ = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase__ = {}
UpperCamelCase__ = []
for target in targets:
UpperCamelCase__ = vocab.get(__lowerCAmelCase , __lowerCAmelCase )
if id_ is None:
UpperCamelCase__ = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , max_length=1 , truncation=__lowerCAmelCase , )["""input_ids"""]
if len(__lowerCAmelCase ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCamelCase__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase__ = list(set(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCamelCase__ = np.array(__lowerCAmelCase )
return target_ids
def _lowerCamelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
UpperCamelCase__ = {}
if targets is not None:
UpperCamelCase__ = self.get_target_ids(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = target_ids
if top_k is not None:
UpperCamelCase__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 358
|
def _UpperCamelCase (a__ :dict ):
"""simple docstring"""
UpperCamelCase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCamelCase__ = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def _UpperCamelCase (a__ :dict , a__ :int , a__ :set , a__ :set ):
"""simple docstring"""
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87
| 0
|
"""simple docstring"""
import os
import sys
_a = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_a = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoConfig.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoTokenizer.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoModel.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCamelCase, **__lowerCamelCase )
| 61
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ : Dict = MaskFormerConfig(backbone_config=__lowerCamelCase )
UpperCAmelCase_ : int = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ : Dict = 847
UpperCAmelCase_ : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ : Tuple = 150
UpperCAmelCase_ : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ : str = 171
UpperCAmelCase_ : Optional[int] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ : int = 133
UpperCAmelCase_ : Tuple = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ : List[Any] = 19
UpperCAmelCase_ : Optional[int] = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ : Any = 65
UpperCAmelCase_ : Union[str, Any] = "mapillary-vistas-id2label.json"
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : str = val
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Tuple = in_proj_weight[:dim, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[: dim]
UpperCAmelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : Tuple = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Tuple = in_proj_bias[-dim :]
# fmt: on
def __a ( __lowerCamelCase, __lowerCamelCase ):
# fmt: off
UpperCAmelCase_ : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : int = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : Dict = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : str = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : Tuple = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : List[Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ):
UpperCAmelCase_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Tuple = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : List[str] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase, "rb" ) as f:
UpperCAmelCase_ : Union[str, Any] = pickle.load(__lowerCamelCase )
UpperCAmelCase_ : str = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ : int = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase, __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
UpperCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase, param.shape )
UpperCAmelCase_ , UpperCAmelCase_ : str = model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ : Optional[int] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ : List[str] = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ : Tuple = 6_5535
else:
UpperCAmelCase_ : Dict = 255
UpperCAmelCase_ : Optional[Any] = True if "ade" in model_name else False
UpperCAmelCase_ : Dict = MaskFormerImageProcessor(ignore_index=__lowerCamelCase, reduce_labels=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = image_processor(__lowerCamelCase, return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**__lowerCamelCase )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ : Any = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowerCamelCase, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 61
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : List[str] = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _A (__a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : str = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Optional[int] , lowercase_ : List[Any]=13 , lowercase_ : Optional[int]=7 , lowercase_ : Dict=True , lowercase_ : Tuple=False , lowercase_ : List[Any]=99 , lowercase_ : Dict=16 , lowercase_ : Any=2 , lowercase_ : List[Any]=4 , lowercase_ : Optional[int]=4 , lowercase_ : Dict="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=32 , lowercase_ : Tuple=2 , lowercase_ : List[Any]=1 , lowercase_ : str=0 , lowercase_ : Any=0.02 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : Any = initializer_range
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
SCREAMING_SNAKE_CASE_ : Tuple = shift_tokens_right(_a , 1 , 2)
SCREAMING_SNAKE_CASE_ : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , )
SCREAMING_SNAKE_CASE_ : Tuple = prepare_blenderbot_inputs_dict(_a , _a , _a)
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class_name(_a)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''])
SCREAMING_SNAKE_CASE_ : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE_ : Any = model.init_cache(decoder_input_ids.shape[0] , _a , _a)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
SCREAMING_SNAKE_CASE_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : Any = model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode(_a , _a)
SCREAMING_SNAKE_CASE_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}')
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE_ : str = model_class_name(_a)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.encode(inputs_dict['''input_ids'''])
SCREAMING_SNAKE_CASE_ : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE_ : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _a , _a)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
SCREAMING_SNAKE_CASE_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
SCREAMING_SNAKE_CASE_ : Dict = model.decode(_a , _a , decoder_attention_mask=_a)
SCREAMING_SNAKE_CASE_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}')
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = 9_9
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_a)
SCREAMING_SNAKE_CASE_ : Dict = lm_model(input_ids=_a)
SCREAMING_SNAKE_CASE_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _a)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_a)
SCREAMING_SNAKE_CASE_ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ : Tuple = lm_model(input_ids=_a , decoder_input_ids=_a)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _a)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ : List[str] = shift_tokens_right(_a , 1 , 2)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.equal(_a , 1).astype(np.floataa).sum()
SCREAMING_SNAKE_CASE_ : int = np.equal(_a , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(_a , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class lowerCAmelCase__ ( __lowercase , unittest.TestCase , __lowercase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxBlenderbotModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_a , _a)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_a)
@jax.jit
def encode_jitted(lowercase_ : Tuple , lowercase_ : str=None , **lowercase_ : Union[str, Any]):
return model.encode(input_ids=_a , attention_mask=_a)
with self.subTest('''JIT Enabled'''):
SCREAMING_SNAKE_CASE_ : Optional[Any] = encode_jitted(**_a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Tuple = encode_jitted(**_a).to_tuple()
self.assertEqual(len(_a) , len(_a))
for jitted_output, output in zip(_a , _a):
self.assertEqual(jitted_output.shape , output.shape)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_a)
SCREAMING_SNAKE_CASE_ : List[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('''JIT Enabled'''):
SCREAMING_SNAKE_CASE_ : Optional[int] = decode_jitted(**_a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Dict = decode_jitted(**_a).to_tuple()
self.assertEqual(len(_a) , len(_a))
for jitted_output, output in zip(_a , _a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.ones((1, 1)) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : str = model(_a)
self.assertIsNotNone(_a)
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''')
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_a)
SCREAMING_SNAKE_CASE_ : str = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''')
SCREAMING_SNAKE_CASE_ : List[Any] = ['''Sam''']
SCREAMING_SNAKE_CASE_ : str = tokenizer(_a , return_tensors='''jax''')
SCREAMING_SNAKE_CASE_ : List[Any] = model.generate(**_a , **_a)
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''Sam is a great name. It means "sun" in Gaelic.'''
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.batch_decode(_a , **_a)
assert generated_txt[0].strip() == tgt_text
| 365
|
"""simple docstring"""
from itertools import permutations
def _A (__a ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE_ : List[str] = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _A (__a = 10 ) -> int:
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 318
| 0
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase ( a_,a_,a_,unittest.TestCase ):
_a = StableDiffusionControlNetImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self ) -> Dict:
torch.manual_seed(0 )
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_A : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_A : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(_a )
_A : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : List[str] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("""mps""" ):
_A : Union[str, Any] = torch.manual_seed(_a )
else:
_A : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
_A : str = 2
_A : Union[str, Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
_A : str = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : List[str] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
_A : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def a__ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a__ ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowercase ( a_,a_,unittest.TestCase ):
_a = StableDiffusionControlNetImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def a__ ( self ) -> Dict:
torch.manual_seed(0 )
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_A : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
_A : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
_A : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_A : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : str = CLIPTextModel(_a )
_A : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
_A : Any = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , _a , _a=0 ) -> Tuple:
if str(_a ).startswith("""mps""" ):
_A : Union[str, Any] = torch.manual_seed(_a )
else:
_A : str = torch.Generator(device=_a ).manual_seed(_a )
_A : List[str] = 2
_A : Dict = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
_A : List[str] = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
_A : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
_A : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.get_dummy_components()
_A : int = self.pipeline_class(**_a )
pipe.to(_a )
_A : int = 10.0
_A : int = 4
_A : Dict = self.get_dummy_inputs(_a )
_A : Dict = steps
_A : Any = scale
_A : Tuple = pipe(**_a )[0]
_A : Union[str, Any] = self.get_dummy_inputs(_a )
_A : Optional[Any] = steps
_A : Optional[Any] = scale
_A : Tuple = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Union[str, Any] = steps
_A : int = scale
_A : List[str] = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_A : Optional[Any] = self.get_dummy_inputs(_a )
_A : Optional[int] = steps
_A : int = scale
_A : List[Any] = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def a__ ( self ) -> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def a__ ( self ) -> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def a__ ( self ) -> Optional[Any]:
_A : Optional[Any] = self.get_dummy_components()
_A : Union[str, Any] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Tuple:
_A : Tuple = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_A : Optional[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A : Union[str, Any] = """evil space-punk bird"""
_A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
_A : List[str] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
_A : List[Any] = pipe(
_a , _a , control_image=_a , generator=_a , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
_A : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
_A : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 26
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCAmelCase, UpperCAmelCase : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCAmelCase : Dict = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCAmelCase : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase : str = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252
| 0
|
"""simple docstring"""
from __future__ import annotations
a__ : Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_snake_case ) )
] # the reference grid
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_snake_case ) )
] # the action grid
__SCREAMING_SNAKE_CASE = init[0]
__SCREAMING_SNAKE_CASE = init[1]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = g + heuristic[x][y] # cost from starting cell to destination cell
__SCREAMING_SNAKE_CASE = [[f, g, x, y]]
__SCREAMING_SNAKE_CASE = False # flag that is set when search is complete
__SCREAMING_SNAKE_CASE = False # flag set if we can't find expand
while not found and not resign:
if len(_snake_case ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__SCREAMING_SNAKE_CASE = cell.pop()
__SCREAMING_SNAKE_CASE = next_cell[2]
__SCREAMING_SNAKE_CASE = next_cell[3]
__SCREAMING_SNAKE_CASE = next_cell[1]
if x == goal[0] and y == goal[1]:
__SCREAMING_SNAKE_CASE = True
else:
for i in range(len(_snake_case ) ): # to try out different valid actions
__SCREAMING_SNAKE_CASE = x + DIRECTIONS[i][0]
__SCREAMING_SNAKE_CASE = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__SCREAMING_SNAKE_CASE = g + cost
__SCREAMING_SNAKE_CASE = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = goal[0]
__SCREAMING_SNAKE_CASE = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__SCREAMING_SNAKE_CASE = x - DIRECTIONS[action[x][y]][0]
__SCREAMING_SNAKE_CASE = y - DIRECTIONS[action[x][y]][1]
__SCREAMING_SNAKE_CASE = xa
__SCREAMING_SNAKE_CASE = ya
invpath.append([x, y] )
__SCREAMING_SNAKE_CASE = []
for i in range(len(_snake_case ) ):
path.append(invpath[len(_snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
a__ : List[str] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a__ : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
a__ : Optional[int] = [len(grid) - 1, len(grid[0]) - 1]
a__ : Any = 1
# the cost map which pushes the path closer to the goal
a__ : Dict = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a__ : List[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a__ : Optional[int] = 9_9
a__ : Optional[int] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 352
|
"""simple docstring"""
from __future__ import annotations
import requests
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def UpperCAmelCase__ (lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
__SCREAMING_SNAKE_CASE = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def UpperCAmelCase__ (lowerCAmelCase_ = 10 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join("* [{title}]({url})".format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 195
| 0
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__UpperCamelCase = data_utils.TransfoXLTokenizer
__UpperCamelCase = data_utils.TransfoXLCorpus
__UpperCamelCase = data_utils
__UpperCamelCase = data_utils
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase , 'rb' ) as fp:
snake_case_ = pickle.load(UpperCAmelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case_ = corpus.vocab.__dict__
torch.save(UpperCAmelCase , UpperCAmelCase )
snake_case_ = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase )
snake_case_ = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(UpperCAmelCase , UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case_ = os.path.abspath(UpperCAmelCase )
snake_case_ = os.path.abspath(UpperCAmelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case_ = TransfoXLConfig()
else:
snake_case_ = TransfoXLConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = TransfoXLLMHeadModel(UpperCAmelCase )
snake_case_ = load_tf_weights_in_transfo_xl(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(f'Save PyTorch model to {os.path.abspath(UpperCAmelCase )}' )
torch.save(model.state_dict() , UpperCAmelCase )
print(f'Save configuration file to {os.path.abspath(UpperCAmelCase )}' )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__UpperCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 69
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__UpperCamelCase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = None
# source code of `config_class`
snake_case_ = inspect.getsource(UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ = ckpt_name
break
return checkpoint
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase )
snake_case_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = '\n'.join(sorted(UpperCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 69
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : Dict = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = '''gelu'''
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=20 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : int=0 , ) -> Any:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = TFBlenderbotSmallModel(config=__lowerCAmelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-3 )
def __lowerCamelCase ( __a :Dict , __a :Tuple , __a :List[Any] , __a :List[str]=None , __a :List[Any]=None , __a :Optional[Any]=None , __a :List[str]=None , __a :int=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Tuple = False
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_tokenizers
@require_tf
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__lowerCamelCase : Optional[int] = '''facebook/blenderbot_small-90M'''
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 276
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A__: List[str] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = ['''DPTFeatureExtractor''']
A__: List[str] = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
UpperCamelCase = [0, 2, 4, 6, 8]
UpperCamelCase = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__ : str = 0
for digit in range(10):
lowercase__ : str = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase)
return result
lowercase__ : Dict = 0
for digita in range(10):
lowercase__ : int = digita
if (remainder + digita) % 2 == 0:
lowercase__ : Optional[Any] = ODD_DIGITS
else:
lowercase__ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__ : List[str] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase : int = 9):
lowercase__ : Tuple = 0
for length in range(1 , max_power + 1):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase)
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 87
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowercase_ :Any = name.replace("img_encoder.pos_embed" ,"vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowercase_ :Union[str, Any] = name.replace("img_encoder.patch_embed.proj" ,"vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowercase_ :Tuple = name.replace("img_encoder.patch_embed.norm" ,"vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowercase_ :Any = name.replace("img_encoder.layers" ,"vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowercase_ :List[str] = name.replace("blocks" ,"layers" )
if "attn" in name and "pre_assign" not in name:
lowercase_ :Dict = name.replace("attn" ,"self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase_ :List[str] = name.replace("proj" ,"out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowercase_ :int = name.replace("pre_assign_attn.attn.proj" ,"pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowercase_ :List[Any] = name.replace("norm1" ,"layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowercase_ :List[Any] = name.replace("norm2" ,"layer_norm2" )
if "img_encoder.norm" in name:
lowercase_ :Any = name.replace("img_encoder.norm" ,"vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase_ :List[str] = name.replace("text_encoder.token_embedding" ,"text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowercase_ :Optional[Any] = name.replace("text_encoder.positional_embedding" ,"text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowercase_ :str = name.replace("text_encoder.transformer.resblocks." ,"text_model.encoder.layers." )
if "ln_1" in name:
lowercase_ :List[str] = name.replace("ln_1" ,"layer_norm1" )
if "ln_2" in name:
lowercase_ :Dict = name.replace("ln_2" ,"layer_norm2" )
if "c_fc" in name:
lowercase_ :Optional[Any] = name.replace("c_fc" ,"fc1" )
if "c_proj" in name:
lowercase_ :int = name.replace("c_proj" ,"fc2" )
if "text_encoder" in name:
lowercase_ :List[str] = name.replace("text_encoder" ,"text_model" )
if "ln_final" in name:
lowercase_ :Optional[int] = name.replace("ln_final" ,"final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase_ :List[str] = name.replace("img_projector.linear_hidden." ,"visual_projection." )
if "img_projector.linear_out." in name:
lowercase_ :Optional[Any] = name.replace("img_projector.linear_out." ,"visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowercase_ :List[str] = name.replace("text_projector.linear_hidden" ,"text_projection" )
if "text_projector.linear_out" in name:
lowercase_ :Optional[Any] = name.replace("text_projector.linear_out" ,"text_projection.3" )
return name
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
for key in orig_state_dict.copy().keys():
lowercase_ :Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ :int = key.split("." )
lowercase_ :List[Any] = int(key_split[2] ), int(key_split[4] )
lowercase_ :Optional[Any] = config.vision_config.hidden_size
if "weight" in key:
lowercase_ :Tuple = val[:dim, :]
lowercase_ :Optional[int] = val[dim : dim * 2, :]
lowercase_ :Any = val[-dim:, :]
else:
lowercase_ :int = val[:dim]
lowercase_ :Tuple = val[dim : dim * 2]
lowercase_ :int = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase_ :Union[str, Any] = key.split("." )
lowercase_ :str = int(key_split[3] )
lowercase_ :int = config.text_config.hidden_size
if "weight" in key:
lowercase_ :Optional[int] = val[:dim, :]
lowercase_ :Optional[Any] = val[
dim : dim * 2, :
]
lowercase_ :Tuple = val[-dim:, :]
else:
lowercase_ :List[str] = val[:dim]
lowercase_ :str = val[dim : dim * 2]
lowercase_ :Union[str, Any] = val[-dim:]
else:
lowercase_ :Dict = rename_key(__lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase_ :Optional[int] = val.squeeze_()
else:
lowercase_ :Optional[Any] = val
return orig_state_dict
def UpperCAmelCase_ ( ):
lowercase_ :str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ :int = Image.open(requests.get(__lowerCamelCase ,stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : str ,__lowerCamelCase : int="groupvit-gcc-yfcc" ,__lowerCamelCase : List[Any]=False ):
lowercase_ :Union[str, Any] = GroupViTConfig()
lowercase_ :Tuple = GroupViTModel(__lowerCamelCase ).eval()
lowercase_ :Any = torch.load(__lowerCamelCase ,map_location="cpu" )["model"]
lowercase_ :Optional[Any] = convert_state_dict(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Any = model.load_state_dict(__lowerCamelCase ,strict=__lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCamelCase ) == 0)
# verify result
lowercase_ :Union[str, Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowercase_ :str = prepare_img()
lowercase_ :Any = processor(text=["a photo of a cat", "a photo of a dog"] ,images=__lowerCamelCase ,padding=__lowerCamelCase ,return_tensors="pt" )
with torch.no_grad():
lowercase_ :int = model(**__lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowercase_ :Dict = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase_ :List[Any] = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image ,__lowerCamelCase ,atol=1e-3 )
processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print("Successfully saved processor and model to" ,__lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCamelCase ,organization="nielsr" )
model.push_to_hub(__lowerCamelCase ,organization="nielsr" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "EncodecFeatureExtractor"
snake_case_ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple ,A : Any ,A : List[str] ):
super().__init__(A ,A )
__A = self.feature_extractor
__A = False
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple=None ,A : List[str]=None ,A : Union[str, Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=A ,language=A ,no_timestamps=A )
def __call__( self : int ,*A : Any ,**A : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A ,**A )
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("sampling_rate" ,A )
__A = kwargs.pop("text" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
__A = self.tokenizer(A ,**A )
if audio is not None:
__A = self.feature_extractor(A ,*A ,sampling_rate=A ,**A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__A = audio_inputs["padding_mask"]
return inputs
def UpperCamelCase_ ( self : List[Any] ,*A : Optional[Any] ,**A : Optional[int] ):
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("padding_mask" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio_values is not None:
return self._decode_audio(A ,padding_mask=A )
else:
return self.tokenizer.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : List[Any] ,*A : Dict ,**A : Tuple ):
return self.tokenizer.decode(*A ,**A )
def UpperCamelCase_ ( self : int ,A : Union[str, Any] ,A : Optional = None ):
__A = to_numpy(A )
__A , __A , __A = audio_values.shape
if padding_mask is None:
return list(A )
__A = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A = seq_len - padding_mask.shape[-1]
__A = 1 - self.feature_extractor.padding_value
__A = np.pad(A ,((0, 0), (0, difference)) ,"constant" ,constant_values=A )
__A = audio_values.tolist()
for i in range(A ):
__A = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A = sliced_audio.reshape(A ,-1 )
return audio_values
| 15
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318
| 0
|
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->bool:
'''simple docstring'''
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = BarthezTokenizer
_lowerCamelCase = BarthezTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
__A : List[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
__A : Any = tokenizer
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = '''<pad>'''
__A : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) , 10_1122 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__A : Any = [0, 57, 3018, 7_0307, 91, 2]
__A : List[Any] = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__A : Any = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A : Tuple = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Union[str, Any] = '''I was born in 92000, and this is falsé.'''
__A : List[Any] = tokenizer.tokenize(__lowerCamelCase )
__A : Optional[int] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__A : Union[str, Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : str = self.get_rust_tokenizer()
__A : Optional[int] = tokenizer.encode(__lowerCamelCase )
__A : Any = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__A : Union[str, Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__lowerCamelCase , )
| 291
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __snake_case ( lowerCAmelCase ):
_a : Tuple= "trocr"
_a : Optional[int]= ["past_key_values"]
_a : str= {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self ,snake_case=50265 ,snake_case=1024 ,snake_case=12 ,snake_case=16 ,snake_case=4096 ,snake_case="gelu" ,snake_case=512 ,snake_case=0.1 ,snake_case=0.0 ,snake_case=0.0 ,snake_case=2 ,snake_case=0.02 ,snake_case=0.0 ,snake_case=True ,snake_case=False ,snake_case=True ,snake_case=True ,snake_case=1 ,snake_case=0 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
lowercase : Dict = vocab_size
lowercase : List[str] = d_model
lowercase : Dict = decoder_layers
lowercase : Union[str, Any] = decoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Union[str, Any] = activation_function
lowercase : Optional[int] = max_position_embeddings
lowercase : List[Any] = dropout
lowercase : Optional[Any] = attention_dropout
lowercase : Tuple = activation_dropout
lowercase : int = init_std
lowercase : str = decoder_layerdrop
lowercase : List[Any] = use_cache
lowercase : int = scale_embedding
lowercase : List[Any] = use_learned_position_embeddings
lowercase : int = layernorm_embedding
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 20
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case , speech_processor=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = "auto" ):
if slice_size == "auto":
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self , snake_case , snake_case=1_6000 , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
lowercase = self.speech_processor.feature_extractor(
snake_case , return_tensors='pt' , sampling_rate=snake_case ).input_features.to(self.device )
lowercase = self.speech_model.generate(snake_case , max_length=48_0000 )
lowercase = self.speech_processor.tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , normalize=snake_case )[
0
]
if isinstance(snake_case , snake_case ):
lowercase = 1
elif isinstance(snake_case , snake_case ):
lowercase = len(snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(snake_case )}.''' )
# get prompt text embeddings
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 , snake_case , 1 )
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [''] * batch_size
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !='''
F''' {type(snake_case )}.''' )
elif isinstance(snake_case , snake_case ):
lowercase = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(1 , snake_case , 1 )
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case ).to(
self.device )
else:
lowercase = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
lowercase = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
lowercase = 1 / 0.18_215 * latents
lowercase = self.vae.decode(snake_case ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 195
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case : str = logging.get_logger(__name__)
_snake_case : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case : int = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase_ ( ):
__snake_case : Optional[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__snake_case : str = bs[:]
__snake_case : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
__snake_case : Dict = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = set()
__snake_case : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : List[str] = char
return pairs
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict="replace" , lowerCamelCase : List[str]="<s>" , lowerCamelCase : Any="</s>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Union[str, Any]="<s>" , lowerCamelCase : str="<unk>" , lowerCamelCase : Any="<pad>" , lowerCamelCase : Optional[int]="<mask>" , lowerCamelCase : List[Any]=False , **lowerCamelCase : str , ) -> Tuple:
__snake_case : Tuple = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__snake_case : Dict = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__snake_case : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__snake_case : str = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__snake_case : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__snake_case : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : Union[str, Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__snake_case : Tuple = json.load(lowerCamelCase )
__snake_case : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__snake_case : str = errors # how to handle errors in decoding
__snake_case : Any = bytes_to_unicode()
__snake_case : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__snake_case : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
__snake_case : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case : Optional[int] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__snake_case : Optional[Any] = {}
__snake_case : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case : Optional[int] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Tuple:
if token in self.cache:
return self.cache[token]
__snake_case : List[Any] = tuple(lowerCamelCase )
__snake_case : int = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__snake_case : Union[str, Any] = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case : List[str] = bigram
__snake_case : Any = []
__snake_case : Union[str, Any] = 0
while i < len(lowerCamelCase ):
try:
__snake_case : Dict = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case : Optional[int] = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case : List[Any] = tuple(lowerCamelCase )
__snake_case : Tuple = new_word
if len(lowerCamelCase ) == 1:
break
else:
__snake_case : Any = get_pairs(lowerCamelCase )
__snake_case : Optional[int] = " ".join(lowerCamelCase )
__snake_case : Dict = word
return word
def __snake_case ( self : List[Any] , lowerCamelCase : str ) -> Union[str, Any]:
__snake_case : Any = []
for token in re.findall(self.pat , lowerCamelCase ):
__snake_case : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def __snake_case ( self : Optional[int] , lowerCamelCase : List[str] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> int:
return self.decoder.get(lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : List[Any] = "".join(lowerCamelCase )
__snake_case : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : Tuple = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Tuple = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__snake_case : List[str] = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
__snake_case : Any = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def __snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
__snake_case : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
__snake_case : Dict = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict=False , **lowerCamelCase : Optional[Any] ) -> Tuple:
__snake_case : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__snake_case : int = " " + text
return (text, kwargs)
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ) -> dict:
__snake_case : Optional[Any] = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__snake_case : List[Any] = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case : Any = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case : List[str] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 360
|
import logging
from transformers.configuration_utils import PretrainedConfig
_snake_case : Optional[Any] = logging.getLogger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "masked_bert"
def __init__( self : Optional[int] , lowerCamelCase : Any=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : str=12 , lowerCamelCase : Dict=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : int=2 , lowerCamelCase : str=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Any=0 , lowerCamelCase : Dict="topK" , lowerCamelCase : List[Any]="constant" , lowerCamelCase : Dict=0.0 , **lowerCamelCase : List[Any] , ) -> Dict:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[str] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : str = pruning_method
__snake_case : int = mask_init
__snake_case : Any = mask_scale
| 134
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE :List[Any]=3_2 , SCREAMING_SNAKE_CASE :List[Any]=3 , SCREAMING_SNAKE_CASE :Optional[int]=4 , SCREAMING_SNAKE_CASE :Any=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE :str=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :str=3_7 , SCREAMING_SNAKE_CASE :List[Any]="gelu" , SCREAMING_SNAKE_CASE :List[Any]=1_0 , SCREAMING_SNAKE_CASE :List[str]=0.02 , SCREAMING_SNAKE_CASE :List[str]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE :Optional[Any]=[2, 3, 4] , SCREAMING_SNAKE_CASE :Tuple=None , ) -> str:
'''simple docstring'''
_a : Any =parent
_a : Union[str, Any] =batch_size
_a : int =image_size
_a : Optional[Any] =num_channels
_a : Tuple =num_stages
_a : List[Any] =hidden_sizes
_a : Union[str, Any] =depths
_a : List[str] =is_training
_a : str =use_labels
_a : Optional[Any] =intermediate_size
_a : Optional[Any] =hidden_act
_a : Any =num_labels
_a : str =initializer_range
_a : Optional[Any] =out_features
_a : Any =out_indices
_a : List[Any] =scope
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
_a : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any =None
if self.use_labels:
_a : Optional[Any] =ids_tensor([self.batch_size] , self.num_labels )
_a : Optional[int] =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[Any]:
'''simple docstring'''
_a : Tuple =ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : List[str] =model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int ) -> List[str]:
'''simple docstring'''
_a : int =ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> Any:
'''simple docstring'''
_a : Union[str, Any] =ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Union[str, Any] =None
_a : Optional[Any] =ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[int] =model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
_a : Dict =self.prepare_config_and_inputs()
_a , _a , _a : Any =config_and_inputs
_a : Union[str, Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Tuple =self.prepare_config_and_inputs()
_a , _a , _a : List[Any] =config_and_inputs
_a : Tuple ={"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Tuple = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : Any = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : List[Any] = False
def __UpperCAmelCase ( self :Optional[Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ConvNextVaModelTester(self )
_a : List[str] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __UpperCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Any =self.model_tester.prepare_config_and_inputs_with_labels()
_a : Dict =True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
_a : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
_a : Optional[Any] =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
_a : List[Any] =model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Any =self.model_tester.prepare_config_and_inputs_with_labels()
_a : List[str] =False
_a : Any =True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : List[str] =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
_a : Union[str, Any] =self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
_a : Any =model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a , _a : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any =model_class(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] =[*signature.parameters.keys()]
_a : List[str] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple ):
_a : str =model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_a : Tuple =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str =self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] =True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[Any] =True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :Optional[Any] ) -> Any:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict =ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
_a : Any =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(SCREAMING_SNAKE_CASE )
_a : Tuple =self.default_image_processor
_a : Optional[int] =prepare_img()
_a : List[Any] =preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_a : List[Any] =model(**SCREAMING_SNAKE_CASE )
# verify the logits
_a : Any =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276
|
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_UpperCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10**6 ) -> int:
_a : List[Any] =0
_a : str =1
_a : Optional[Any] =7
while prime_candidate < max_prime:
primes_count += is_prime(_UpperCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 276
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _snake_case ( A , A , A , A=None , A=None , A=None , A=None , A=None , ) -> Tuple:
if attention_mask is None:
lowerCAmelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A )
if decoder_head_mask is None:
lowerCAmelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A )
if cross_attn_head_mask is None:
lowerCAmelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=99 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_="relu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=20 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=0 , ) -> Any:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = encoder_layerdrop
lowerCAmelCase__ = decoder_layerdrop
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.eos_token_id # Eos Token
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = prepare_mam_aaa_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = MaMaaaModel(config=lowerCamelCase_ ).get_decoder().to(lowerCamelCase_ ).eval()
lowerCAmelCase__ = inputs_dict['''input_ids''']
lowerCAmelCase__ = inputs_dict['''attention_mask''']
lowerCAmelCase__ = inputs_dict['''head_mask''']
# first forward pass
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , head_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )['''last_hidden_state''']
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[
'''last_hidden_state'''
]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2 ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = MaMaaaModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.encoder_last_hidden_state
lowerCAmelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = model.get_encoder()
encoder.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = MaMaaaEncoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCAmelCase__ = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = model.get_decoder()
decoder.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = MaMaaaDecoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCAmelCase__ = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : str = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase__ : int = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase__ : str = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = True
lowercase__ : int = True
lowercase__ : int = False
lowercase__ : List[str] = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = MaMaaaModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = model_class.from_pretrained(lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertEqual(info['''missing_keys'''] , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if not self.is_encoder_decoder:
lowerCAmelCase__ = inputs['''input_ids''']
del inputs["input_ids"]
else:
lowerCAmelCase__ = inputs['''input_ids''']
lowerCAmelCase__ = inputs.get('''decoder_input_ids''' , lowerCamelCase_ )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , lowerCamelCase_ )
lowerCAmelCase__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase__ = wte(lowerCamelCase_ )
else:
lowerCAmelCase__ = wte(lowerCamelCase_ )
lowerCAmelCase__ = wte(lowerCamelCase_ )
with torch.no_grad():
model(**lowerCamelCase_ )[0]
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = input_dict['''input_ids''']
lowerCAmelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCAmelCase__ = MaMaaaForConditionalGeneration(lowerCamelCase_ ).eval().to(lowerCamelCase_ )
if torch_device == "cuda":
model.half()
model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
model.generate(num_beams=4 , do_sample=lowerCamelCase_ , early_stopping=lowerCamelCase_ , num_return_sequences=3 )
def _snake_case ( A ) -> str:
return torch.tensor(A , dtype=torch.long , device=A )
__UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowerCAmelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowerCAmelCase__ = prepare_mam_aaa_inputs_dict(model.config , lowerCamelCase_ , lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , lowerCamelCase_ )
# change to expected output here
lowerCAmelCase__ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCamelCase_ )
# change to intended input
lowerCAmelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
lowerCAmelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
lowerCAmelCase__ = prepare_mam_aaa_inputs_dict(model.config , lowerCamelCase_ , lowerCamelCase_ )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
# change to expected output here
lowerCAmelCase__ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowerCamelCase_ )
lowerCAmelCase__ = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
lowerCAmelCase__ = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase__ = model.generate(
input_ids=dct['''input_ids'''].to(lowerCamelCase_ ) , attention_mask=dct['''attention_mask'''].to(lowerCamelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
lowerCAmelCase__ = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
lowerCAmelCase__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
assert generated == expected_en
| 228
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
__UpperCAmelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _snake_case ( A ) -> Union[str, Any]:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCAmelCase__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCAmelCase__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCAmelCase__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _snake_case ( A , A , A , A ) -> Optional[int]:
lowerCAmelCase__ = {}
import re
lowerCAmelCase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_conv_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_proj_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCAmelCase__ = re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_conv_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_proj_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCAmelCase__ = re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_conv_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_proj_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCAmelCase__ = re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
lowerCAmelCase__ = original_key
lowerCAmelCase__ = replace_key(A )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
lowerCAmelCase__ = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCAmelCase__ = original_key
lowerCAmelCase__ = original_key
lowerCAmelCase__ = value
return new_dict
@torch.no_grad()
def _snake_case ( A=None , A=None ) -> str:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCAmelCase__ = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
lowerCAmelCase__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCAmelCase__ = JukeboxConfig.from_pretrained(A )
lowerCAmelCase__ = JukeboxModel(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
for i, dict_name in enumerate(A ):
lowerCAmelCase__ = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
lowerCAmelCase__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCAmelCase__ = old_dic[k]
elif k.endswith('''.w''' ):
lowerCAmelCase__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase__ = old_dic[k]
else:
lowerCAmelCase__ = old_dic[k]
lowerCAmelCase__ = '''vqvae''' if i == 0 else F"""priors.{3 - i}"""
lowerCAmelCase__ = fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
lowerCAmelCase__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(A , A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
__UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 228
| 1
|
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
_snake_case = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ) -> Optional[int]:
_lowercase : Any = os.path.dirname(os.path.realpath(lowerCAmelCase__ ) )
_lowercase : List[Any] = os.path.join(lowerCAmelCase__ , "words.txt" )
_lowercase : int = """"""
with open(lowerCAmelCase__ ) as f:
_lowercase : str = f.readline()
_lowercase : Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_lowercase : Union[str, Any] = [
word
for word in [sum(ord(lowerCAmelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 250
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 1_0, """max_num_jobs""": 1}, [range(1_0 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 1_0}, [range(lowerCAmelCase__ , i + 1 ) for i in range(1_0 )]),
({"""num_shards""": 1, """max_num_jobs""": 1_0}, [range(1 )]),
({"""num_shards""": 1_0, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({"""num_shards""": 3, """max_num_jobs""": 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = _distribute_shards(**lowerCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 1_0, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = _split_gen_kwargs(lowerCAmelCase__ , lowerCAmelCase__ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase__ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
else:
UpperCAmelCase_: List[str] = _number_of_shards_in_gen_kwargs(lowerCAmelCase__ )
assert out == expected
| 147
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _A ( __lowercase ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__magic_name__ , """num_attention_heads""" ) )
class _A :
def __init__( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : str=13 , __magic_name__ : int=64 , __magic_name__ : str=3 , __magic_name__ : Tuple=3 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=1 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Any=[1_28, 2_56, 3_84] , __magic_name__ : List[Any]=[4, 6, 8] , __magic_name__ : Union[str, Any]=[2, 3, 4] , __magic_name__ : Union[str, Any]=[16, 16, 16] , __magic_name__ : Optional[Any]=0 , __magic_name__ : List[str]=[2, 2, 2] , __magic_name__ : int=[2, 2, 2] , __magic_name__ : List[str]=0.02 , __magic_name__ : Tuple=True , __magic_name__ : int=True , __magic_name__ : Dict=2 , ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Dict = num_channels
__snake_case : int = kernel_size
__snake_case : List[Any] = stride
__snake_case : Union[str, Any] = padding
__snake_case : List[str] = hidden_sizes
__snake_case : int = num_attention_heads
__snake_case : List[Any] = depths
__snake_case : Tuple = key_dim
__snake_case : List[str] = drop_path_rate
__snake_case : Tuple = patch_size
__snake_case : List[str] = attention_ratio
__snake_case : Tuple = mlp_ratio
__snake_case : int = initializer_range
__snake_case : Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__snake_case : List[Any] = is_training
__snake_case : List[Any] = use_labels
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = LevitModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = model(__magic_name__ )
__snake_case : Optional[Any] = (self.image_size, self.image_size)
__snake_case , __snake_case : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
__snake_case : Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__snake_case : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.num_labels
__snake_case : Optional[int] = LevitForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__: List[Any] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__: Optional[int] = False
lowercase__: str = False
lowercase__: List[Any] = False
lowercase__: Optional[int] = False
lowercase__: Tuple = False
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : str = LevitModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ):
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Any = outputs.hidden_states
__snake_case : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
__snake_case , __snake_case : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
__snake_case : List[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__snake_case : Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Any ) -> Dict:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__magic_name__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__snake_case : int = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : Optional[Any] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = model(**__magic_name__ ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__snake_case : int = False
__snake_case : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__magic_name__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__snake_case : Any = model_class(__magic_name__ )
model.gradient_checkpointing_enable()
model.to(__magic_name__ )
model.train()
__snake_case : List[str] = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Dict = model(**__magic_name__ ).loss
loss.backward()
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__magic_name__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
__snake_case : Tuple = problem_type["""title"""]
__snake_case : Optional[int] = problem_type["""num_labels"""]
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if problem_type["num_labels"] > 1:
__snake_case : Dict = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
__snake_case : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__magic_name__ ) as warning_list:
__snake_case : List[Any] = model(**__magic_name__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = LevitModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : str = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__magic_name__ )
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Optional[int] = model(**__magic_name__ )
# verify the logits
__snake_case : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 352
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__A =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "inv_freq":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "pos_bias_u" in name:
lowerCamelCase_ = None
elif "pos_bias_v" in name:
lowerCamelCase_ = None
elif "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "inv_freq" in name:
lowerCamelCase_ = "inv_freq"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="swish" )
else:
lowerCamelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase_ = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , "vocab.json" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ = 0
lowerCamelCase_ = 1
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase__ , )
lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCamelCase_ = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase_ = fairseq.tasks.setup_task(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 19
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , _a=2_048 , _a=1 , _a=[16, 16] , _a=128 , _a=44_100 , _a=86 , _a=2_048 , _a=0.0 , **_a , ):
"""simple docstring"""
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , **_a , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = spectrogram(
_a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _a , _a = None , _a = True , _a = None , _a = False , _a = False , **_a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
lowerCamelCase = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _a ):
lowerCamelCase = [np.asarray(_a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 291
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase ={"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
__UpperCAmelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = input('''Enter message: ''' )
__lowerCamelCase = input('''Enter key [alphanumeric]: ''' )
__lowerCamelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowerCamelCase = '''encrypt'''
__lowerCamelCase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__lowerCamelCase = '''decrypt'''
__lowerCamelCase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''encrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''decrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = key.upper()
for symbol in message:
__lowerCamelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCamelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCamelCase__ ):
__lowerCamelCase = 0
else:
translated.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 237
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__snake_case : Any = pytest.mark.integration
@pytest.mark.parametrize("""path""", ["""paws""", """csv"""] )
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
inspect_dataset(__snake_case, __snake_case )
A__ : Dict =path + """.py"""
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""", ["""accuracy"""] )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Any ) -> int:
"""simple docstring"""
inspect_metric(__snake_case, __snake_case )
A__ : int =path + """.py"""
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""", [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
], )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any], __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =get_dataset_config_info(__snake_case, config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""", [
("""paws""", None, ValueError),
], )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Optional[Any], __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case, config_name=__snake_case )
@pytest.mark.parametrize(
"""path, expected""", [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
], )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] =get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""", [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
], )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : Optional[Any] ) -> int:
"""simple docstring"""
A__ : List[Any] =get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
A__ : Optional[int] =expected_configs[0]
assert expected_config in infos
A__ : List[str] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""", [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
], )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : List[Any], __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
A__ : str =get_dataset_infos(__snake_case )
assert expected_config in infos
A__ : int =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""", [
("""paws""", None, ValueError),
], )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[int], __snake_case : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case, config_name=__snake_case )
| 134
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase_ ( __UpperCamelCase, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = CpmAntTokenizer
__UpperCamelCase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase : Optional[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
UpperCamelCase : Any = """今天天气真好!"""
UpperCamelCase : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
UpperCamelCase : int = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Dict = """今天天气真好!"""
UpperCamelCase : List[Any] = [tokenizer.bos_token] + tokens
UpperCamelCase : List[str] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
UpperCamelCase : Any = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 370
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315
| 0
|
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase ) -> float:
a = sorted(numsa + numsa )
a , a = divmod(len(__lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
__UpperCamelCase : List[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 228
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = WavaVecaForSequenceClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""projector.weight"""]
a = downstream_dict["""projector.bias"""]
a = downstream_dict["""model.post_net.linear.weight"""]
a = downstream_dict["""model.post_net.linear.bias"""]
return model
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""model.linear.weight"""]
a = downstream_dict["""model.linear.bias"""]
return model
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
a = WavaVecaForXVector.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
a = downstream_dict["""connector.weight"""]
a = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
a = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
a = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
a = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
a = torch.load(__lowerCamelCase , map_location="""cpu""" )
a = checkpoint["""Downstream"""]
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
a = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , do_normalize=__lowerCamelCase )
a = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
a = convert_classification(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
a = convert_diarization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
a = convert_xvector(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
a = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__UpperCamelCase : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 228
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case ( snake_case__ :Tuple) -> Union[str, Any]:
_A = {}
_A = job["""started_at"""]
_A = job["""completed_at"""]
_A = date_parser.parse(snake_case__)
_A = date_parser.parse(snake_case__)
_A = round((end_datetime - start_datetime).total_seconds() / 60.0)
_A = start
_A = end
_A = duration_in_min
return job_info
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Union[str, Any]=None) -> List[Any]:
_A = None
if token is not None:
_A = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
_A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A = requests.get(snake_case__ , headers=snake_case__).json()
_A = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__) for job in result["""jobs"""]})
_A = math.ceil((result["""total_count"""] - 100) / 100)
for i in range(snake_case__):
_A = requests.get(url + F'''&page={i + 2}''' , headers=snake_case__).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__) for job in result["""jobs"""]})
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''')
return {}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = get_job_time(args.workflow_run_id)
_SCREAMING_SNAKE_CASE = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 81
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def snake_case ( snake_case__ :str = "dhaka" , snake_case__ :int = 5) -> int:
_A = min(snake_case__ , 50) # Prevent abuse!
_A = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_A = requests.get("""https://www.google.com/search""" , params=snake_case__ , headers=snake_case__)
_A = BeautifulSoup(html.text , """html.parser""")
_A = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script"""))))
_A = json.dumps(snake_case__)
_A = json.loads(snake_case__)
_A = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , snake_case__ , )
if not matched_google_image_data:
return 0
_A = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(snake_case__) , )
_A = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , snake_case__ , )
for index, fixed_full_res_image in enumerate(snake_case__):
if index >= max_images:
return index
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = urllib.request.build_opener()
_A = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(snake_case__)
_A = F'''query_{query.replace(' ' , '_')}'''
if not os.path.exists(snake_case__):
os.makedirs(snake_case__)
urllib.request.urlretrieve( # noqa: S310
snake_case__ , F'''{path_name}/original_size_img_{index}.jpg''')
return index
if __name__ == "__main__":
try:
_SCREAMING_SNAKE_CASE = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 81
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "philschmid/bart-large-cnn-samsum"
__UpperCamelCase = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
__UpperCamelCase = "summarizer"
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ["text"]
__UpperCamelCase = ["text"]
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Any):
'''simple docstring'''
return self.pre_processor(lowercase_ , return_tensors='''pt''' , truncation=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[str]):
'''simple docstring'''
return self.model.generate(**lowercase_)[0]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[int]):
'''simple docstring'''
return self.pre_processor.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_)
| 91
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , ) -> str:
if attention_mask is None:
SCREAMING_SNAKE_CASE = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=0.02 , ) -> Dict:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = initializer_range
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = 9_9
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE = input_ids.shape[0]
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_config_and_data()
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = lm_model(input_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
SCREAMING_SNAKE_CASE = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase , lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = FlaxBlenderbotModelTester(self )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
SCREAMING_SNAKE_CASE = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
SCREAMING_SNAKE_CASE = ['Sam']
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='jax' )
SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'Sam is a great name. It means "sun" in Gaelic.'
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 38
|
"""simple docstring"""
from math import sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_00_00 ) -> int:
SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38
| 1
|
"""simple docstring"""
import enum
import shutil
import sys
_a , _a : int = shutil.get_terminal_size()
_a : Optional[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A ( enum.Enum ):
_UpperCamelCase : int = 0
_UpperCamelCase : List[str] = 1
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Tuple="" ) -> Optional[int]:
sys.stdout.write(str(_lowerCamelCase ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Union[str, Any]="" ) -> Optional[int]:
forceWrite(f"\u001b[{color}m{content}\u001b[0m" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
forceWrite("""\r""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> Optional[Any]:
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE ( ) -> Any:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 44
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( _lowerCamelCase : list[list[float]] ):
A__ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
A__ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
A__ = [[0.0, 0.0], [0.0, 0.0]]
A__, A__ = matrix[1][1], matrix[0][0]
A__, A__ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
A__ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
A__ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
A__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
A__ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
A__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
A__ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
A__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
A__ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
A__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
A__ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
A__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
A__ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
A__ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
A__ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 237
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=768 ) ->List[Any]:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = proj_size
SCREAMING_SNAKE_CASE : Any = CLIPVisionModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = PaintByExampleMapper(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model(pixel_values=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = clip_output.pooler_output
SCREAMING_SNAKE_CASE : Optional[Any] = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE : Tuple = self.final_layer_norm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.proj_out(_lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->List[str]:
super().__init__()
SCREAMING_SNAKE_CASE : str = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE : List[Any] = config.hidden_size
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , activation_fn='''gelu''' , attention_bias=_lowerCamelCase )
for _ in range(_lowerCamelCase )
] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
for block in self.blocks:
SCREAMING_SNAKE_CASE : Optional[int] = block(_lowerCamelCase )
return hidden_states
| 19
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''DeiTFeatureExtractor''']
a__ : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19
| 1
|
import argparse
import datetime
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
__lowerCamelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
__lowerCamelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
__lowerCamelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
__lowerCamelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
__lowerCamelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
__lowerCamelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
__lowerCamelCase = datetime.date(int(A__ ) , int(A__ ) , int(A__ ) )
# Start math
if m <= 2:
__lowerCamelCase = y - 1
__lowerCamelCase = m + 12
# maths var
__lowerCamelCase = int(str(A__ )[:2] )
__lowerCamelCase = int(str(A__ )[2:] )
__lowerCamelCase = int(2.6 * m - 5.39 )
__lowerCamelCase = int(c / 4 )
__lowerCamelCase = int(k / 4 )
__lowerCamelCase = int(d + k )
__lowerCamelCase = int(t + u + v + x )
__lowerCamelCase = int(z - (2 * c) )
__lowerCamelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
__lowerCamelCase = f'Your date {date_input}, is a {days[str(A__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
UpperCAmelCase_ = parser.parse_args()
zeller(args.date_input)
| 12
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315
| 0
|
from functools import lru_cache
@lru_cache
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : int = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : int = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : Dict = num_labels
UpperCamelCase : Tuple = num_choices
UpperCamelCase : Optional[int] = scope
UpperCamelCase : List[Any] = q_groups
UpperCamelCase : Tuple = k_groups
UpperCamelCase : Any = v_groups
UpperCamelCase : List[str] = post_attention_groups
UpperCamelCase : Tuple = intermediate_groups
UpperCamelCase : int = output_groups
def a_ ( self ):
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Tuple = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = self.num_labels
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = self.num_labels
UpperCamelCase : str = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Tuple = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = config_and_inputs
UpperCamelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase : Dict = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : str = True
lowercase : str = False
def a_ ( self ):
UpperCamelCase : Any = SqueezeBertModelTester(self )
UpperCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def a_ ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
UpperCamelCase : Dict = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 27
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "trajectory_transformer"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=100 , __A=5 , __A=1 , __A=1 , __A=249 , __A=6 , __A=17 , __A=25 , __A=4 , __A=4 , __A=128 , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0_006 , __A=512 , __A=0.02 , __A=1E-1_2 , __A=1 , __A=True , __A=1 , __A=5_0256 , __A=5_0256 , **__A , ) -> List[Any]:
a =vocab_size
a =action_weight
a =reward_weight
a =value_weight
a =max_position_embeddings
a =block_size
a =action_dim
a =observation_dim
a =transition_dim
a =learning_rate
a =n_layer
a =n_head
a =n_embd
a =embd_pdrop
a =attn_pdrop
a =resid_pdrop
a =initializer_range
a =layer_norm_eps
a =kaiming_initializer_range
a =use_cache
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
| 81
|
"""simple docstring"""
from __future__ import annotations
import math
def _A ( lowercase ):
"""simple docstring"""
if num <= 0:
a =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(lowercase )
a =[True] * (num + 1)
a =[]
a =2
a =int(math.sqrt(lowercase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase ):
if sieve[i] is True:
a =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 81
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''ClapFeatureExtractor'''
UpperCamelCase_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("sampling_rate" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 319
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCAmelCase_ : Optional[Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=16 , __lowerCamelCase : Any=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Dict=14 , __lowerCamelCase : Any=10 , __lowerCamelCase : Optional[int]=19 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : str=2 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=[1, 2, 3, 4, 5] , __lowerCamelCase : Tuple=25 , __lowerCamelCase : str=5 , ):
UpperCamelCase :Any = d_model
UpperCamelCase :Dict = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[int] = prediction_length
UpperCamelCase :List[str] = context_length
UpperCamelCase :List[Any] = cardinality
UpperCamelCase :Tuple = num_time_features
UpperCamelCase :List[Any] = lags_sequence
UpperCamelCase :List[str] = embedding_dimension
UpperCamelCase :Optional[int] = is_training
UpperCamelCase :List[str] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :Tuple = num_attention_heads
UpperCamelCase :int = intermediate_size
UpperCamelCase :Union[str, Any] = hidden_act
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :List[Any] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = context_length
UpperCamelCase :Tuple = prediction_length + label_length
UpperCamelCase :Tuple = label_length
UpperCamelCase :List[str] = moving_average
UpperCamelCase :Any = autocorrelation_factor
def _A ( self : str ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self : List[Any] , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :str = config.context_length + max(config.lags_sequence )
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCamelCase :int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCamelCase :Dict = floats_tensor([self.batch_size, _past_length] )
UpperCamelCase :Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCamelCase :List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCamelCase :Any = floats_tensor([self.batch_size, config.prediction_length] )
UpperCamelCase :Union[str, Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _A ( self : List[Any] ):
UpperCamelCase :Optional[int] = self.get_config()
UpperCamelCase :Dict = self.prepare_autoformer_inputs_dict(__lowerCamelCase )
return config, inputs_dict
def _A ( self : Optional[Any] ):
UpperCamelCase , UpperCamelCase :Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
UpperCamelCase :str = AutoformerModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
UpperCamelCase :Optional[int] = model(**__lowerCamelCase )
UpperCamelCase :str = outputs.encoder_last_hidden_state
UpperCamelCase :List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :List[Any] = model.get_encoder()
encoder.save_pretrained(__lowerCamelCase )
UpperCamelCase :List[Any] = AutoformerEncoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = model.create_network_inputs(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCamelCase :Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCamelCase :Optional[int] = encoder(inputs_embeds=__lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
UpperCamelCase :str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCamelCase :Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCamelCase :Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCamelCase :Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :Optional[int] = model.get_decoder()
decoder.save_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[Any] = AutoformerDecoder.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
UpperCamelCase :Optional[int] = decoder(
trend=__lowerCamelCase , inputs_embeds=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
snake_case__ : str = (AutoformerForPrediction,) if is_torch_available() else ()
snake_case__ : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
snake_case__ : Any = False
snake_case__ : Tuple = False
snake_case__ : int = False
snake_case__ : Optional[int] = False
def _A ( self : Any ):
UpperCamelCase :Any = AutoformerModelTester(self )
UpperCamelCase :Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _A ( self : Dict ):
self.config_tester.run_common_tests()
def _A ( self : Tuple ):
UpperCamelCase , UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = model_class.from_pretrained(__lowerCamelCase , output_loading_info=__lowerCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _A ( self : str ):
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowerCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _A ( self : List[Any] ):
pass
def _A ( self : Dict ):
UpperCamelCase :List[Any] = inspect.signature(getattr(__lowerCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
UpperCamelCase :Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase , UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :int = model_class(__lowerCamelCase )
UpperCamelCase :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
UpperCamelCase :List[str] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__lowerCamelCase )] , __lowerCamelCase )
def _A ( self : Tuple ):
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = True
UpperCamelCase :Optional[int] = getattr(self.model_tester , """seq_length""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __lowerCamelCase )
UpperCamelCase :List[str] = getattr(self.model_tester , """encoder_seq_length""" , __lowerCamelCase )
UpperCamelCase :str = getattr(self.model_tester , """d_model""" , __lowerCamelCase )
UpperCamelCase :str = getattr(self.model_tester , """num_attention_heads""" , __lowerCamelCase )
UpperCamelCase :List[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Tuple = False
UpperCamelCase :List[str] = True
UpperCamelCase :Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase :List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase :int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase :List[Any] = True
UpperCamelCase :Tuple = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase :Dict = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase :List[Any] = outputs.encoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCamelCase :int = len(__lowerCamelCase )
UpperCamelCase :Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
# decoder attentions
UpperCamelCase :Optional[int] = outputs.decoder_attentions
self.assertIsInstance(__lowerCamelCase , (list, tuple) )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCamelCase :Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(__lowerCamelCase , (list, tuple) )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCamelCase :Dict = True
UpperCamelCase :Optional[Any] = True
UpperCamelCase :Tuple = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase :str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(__lowerCamelCase ) )
UpperCamelCase :Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self : str ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict="train-batch.pt" ) -> int:
"""simple docstring"""
UpperCamelCase :Tuple = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=__magic_name__ , repo_type="""dataset""" )
UpperCamelCase :Optional[int] = torch.load(__magic_name__ , map_location=__magic_name__ )
return batch
@require_torch
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : str ):
UpperCamelCase :Optional[Any] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowerCamelCase )
UpperCamelCase :int = prepare_batch()
with torch.no_grad():
UpperCamelCase :Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
UpperCamelCase :int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase :Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def _A ( self : Union[str, Any] ):
UpperCamelCase :str = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowerCamelCase )
UpperCamelCase :Any = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase :Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
UpperCamelCase :str = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __lowerCamelCase , atol=__lowerCamelCase ) )
def _A ( self : Dict ):
UpperCamelCase :List[str] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__lowerCamelCase )
UpperCamelCase :str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase :str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
UpperCamelCase :Union[str, Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __lowerCamelCase )
UpperCamelCase :Any = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __lowerCamelCase , rtol=1E-1 ) )
| 38
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Optional[Any] = TransfoXLTokenizer
snake_case__ : List[Any] = False
snake_case__ : Tuple = False
def _A ( self : str ):
super().setUp()
UpperCamelCase :Dict = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _A ( self : List[str] , **__lowerCamelCase : Any ):
UpperCamelCase :Any = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : int ):
UpperCamelCase :List[Any] = """<unk> UNwanted , running"""
UpperCamelCase :int = """<unk> unwanted, running"""
return input_text, output_text
def _A ( self : Tuple ):
UpperCamelCase :List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _A ( self : Tuple ):
UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase )
UpperCamelCase :Optional[int] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCamelCase :Optional[int] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : List[Any] ):
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :List[str] = len(__lowerCamelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 38
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a : Any = logging.get_logger(__name__)
@dataclass
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__lowerCamelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **snake_case__ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : int= deprecated_arg[3:]
setattr(self , __A , not kwargs.pop(__A ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase__ : Optional[int]= kwargs.pop("torchscript" , self.torchscript )
lowercase__ : List[str]= kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowercase__ : Tuple= kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**__A )
__lowerCamelCase = field(default=A__ , metadata={"help": "Trace the models using torchscript"} )
__lowerCamelCase = field(default=A__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__lowerCamelCase = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowercase__ : str= torch.device("cpu" )
lowercase__ : Tuple= 0
elif is_torch_tpu_available():
lowercase__ : int= xm.xla_device()
lowercase__ : Optional[Any]= 0
else:
lowercase__ : Tuple= torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase__ : List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 351
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= "A painting of a squirrel eating a burger "
lowercase__ : Optional[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= generator.manual_seed(0 )
lowercase__ : Tuple= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[str]= "A painting of a squirrel eating a burger "
lowercase__ : Union[str, Any]= torch.manual_seed(0 )
lowercase__ : Optional[Any]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : List[str]= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int]= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'rwkv'
lowerCAmelCase__ = {'max_position_embeddings': 'context_length'}
def __init__( self , lowercase=50277 , lowercase=1024 , lowercase=4096 , lowercase=32 , lowercase=None , lowercase=None , lowercase=1e-5 , lowercase=0 , lowercase=0 , lowercase=6 , lowercase=False , lowercase=True , **lowercase , ) -> Tuple:
lowerCamelCase_ = vocab_size
lowerCamelCase_ = context_length
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = rescale_every
lowerCamelCase_ = use_cache
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(
tie_word_embeddings=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 19
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ) ->None:
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = degree
def __add__( self : Optional[Any] , lowerCamelCase__ : List[str] ) ->Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCAmelCase : Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCamelCase__ )
else:
_UpperCAmelCase : Dict = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCamelCase__ )
def __sub__( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Optional[int] ) ->Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : int , lowerCamelCase__ : Any ) ->Polynomial:
'''simple docstring'''
_UpperCAmelCase : Dict = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) ->int | float:
'''simple docstring'''
_UpperCAmelCase : Dict = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : Any ) ->str:
'''simple docstring'''
return self.__str__()
def lowerCAmelCase__ ( self : int ) ->Polynomial:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [0] * self.degree
for i in range(self.degree ):
_UpperCAmelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any = 0 ) ->Polynomial:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [0] * (self.degree + 2)
_UpperCAmelCase : List[str] = constant
for i in range(self.degree + 1 ):
_UpperCAmelCase : str = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCamelCase__ )
def __eq__( self : Optional[Any] , lowerCamelCase__ : Tuple ) ->bool:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCamelCase__ : List[str] ) ->bool:
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 366
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 0
|
'''simple docstring'''
import math
import sys
def a ( __a ) -> int:
'''simple docstring'''
if number != int(__a ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
UpperCamelCase__ :str = [-1] * (number + 1)
UpperCamelCase__ :Optional[int] = 0
for i in range(1 , number + 1 ):
UpperCamelCase__ :List[Any] = sys.maxsize
UpperCamelCase__ :int = int(math.sqrt(__a ) )
for j in range(1 , root + 1 ):
UpperCamelCase__ :int = 1 + answers[i - (j**2)]
UpperCamelCase__ :str = min(__a , __a )
UpperCamelCase__ :Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
|
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase = False
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
image=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 257
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["transformers", "torch", "note_seq"]
def __init__( self , *_A , **_A ) -> Any:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> List[str]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 257
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
'''simple docstring'''
debug_launcher(test_script.main )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 360
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.