code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 73
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
_UpperCAmelCase : Dict = model(a_ )["""last_hidden_state"""]
_UpperCAmelCase : Dict = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,a_ )
# compare the actual values for a slice.
_UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 215
| 0
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase : Any = """http://www.mocksite.com/file1.txt"""
UpperCAmelCase : Union[str, Any] = """\"text\": [\"foo\", \"foo\"]"""
UpperCAmelCase : Dict = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __lowerCAmelCase :
_lowercase : List[Any] = 200
_lowercase : str = {"Content-Length": "100"}
_lowercase : Optional[Any] = {}
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(_UpperCAmelCase , "utf-8" )]
def _A ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
import requests
monkeypatch.setattr(__UpperCAmelCase , "request" , __UpperCAmelCase )
a__ : Union[str, Any] =URL
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Union[str, Any] =url
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
a__ : List[str] =[url]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Tuple ={'''train''': url}
a__ : Union[str, Any] ='''dummy'''
a__ : List[Any] ='''downloads'''
a__ : Tuple =tmp_path
a__ : Any =DownloadConfig(
cache_dir=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , use_etag=__UpperCAmelCase , )
a__ : Any =DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
a__ : str =dl_manager.download(__UpperCAmelCase )
a__ : Dict =urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Dict =[downloaded_paths]
a__ : Tuple =[urls]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in downloaded_paths.keys()
a__ : Optional[Any] =downloaded_paths.values()
a__ : Any =urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
a__ : Optional[Any] =Path(__UpperCAmelCase )
a__ : int =downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
a__ : Tuple =downloaded_path.read_text()
assert content == CONTENT
a__ : Dict =downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
a__ : Optional[Any] =json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : List[Any] =str(__UpperCAmelCase )
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
a__ : int =filename
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
a__ : str =[filename]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Dict ={'''train''': filename}
a__ : int ='''dummy'''
a__ : Optional[int] =xz_file.parent
a__ : Dict ='''extracted'''
a__ : Dict =DownloadConfig(
cache_dir=__UpperCAmelCase , use_etag=__UpperCAmelCase , )
a__ : int =DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
a__ : Any =dl_manager.extract(__UpperCAmelCase )
a__ : Dict =paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Optional[Any] =[extracted_paths]
a__ : Union[str, Any] =[paths]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in extracted_paths.keys()
a__ : Tuple =extracted_paths.values()
a__ : List[Any] =paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
a__ : Dict =Path(__UpperCAmelCase )
a__ : Any =extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCAmelCase , etag=__UpperCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
a__ : Optional[Any] =extracted_path.read_text()
a__ : Any =text_file.read_text()
assert extracted_file_content == expected_file_content
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__UpperCAmelCase , start=1 ):
a__ : Dict =json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : List[Any] =request.getfixturevalue(__UpperCAmelCase )
a__ : Optional[int] =DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : Optional[Any] =request.getfixturevalue(__UpperCAmelCase )
a__ : List[Any] =DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : List[Any] =DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCAmelCase ) , start=1 ):
assert os.path.basename(__UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 352
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase : Union[str, Any] = 1.054571817E-34 # unit of ℏ : J * s
UpperCAmelCase : Union[str, Any] = 3E8 # unit of c : m * s^-1
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
a__ : Tuple =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a__ : Any =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a__ : List[str] =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( ):
UpperCAmelCase = []
UpperCAmelCase = 1
while len(lowercase_ ) < 1e6:
constant.append(str(lowercase_ ) )
i += 1
UpperCAmelCase = ''.join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 78
|
import os
def a ( A__ : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as input_file:
_lowercase =[
[int(A__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase =len(A__ )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
_lowercase =matrix[i][0]
for j in range(1 , A__ ):
for i in range(A__ ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A__ ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 205
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
def UpperCamelCase ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=9,out_channels=4,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''),up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''),cross_attention_dim=32,attention_head_dim=(2, 4),use_linear_projection=__lowerCamelCase,)
A__ = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='''gelu''',projection_dim=512,)
A__ = CLIPTextModel(__lowerCamelCase )
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
A__ = floats_tensor((1, 3, 32, 32),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = image.cpu().permute(0,2,3,1 )[0]
A__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((64, 64) )
A__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ):
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionInpaintPipeline(**__lowerCamelCase )
A__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_inputs(__lowerCamelCase )
A__ = sd_pipe(**__lowerCamelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = StableDiffusionInpaintPipeline.from_pretrained(__lowerCamelCase,safety_checker=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,generator=__lowerCamelCase,output_type='''np''',)
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCamelCase,torch_dtype=torch.floataa,safety_checker=__lowerCamelCase,)
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,generator=__lowerCamelCase,output_type='''np''',)
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
A__ = '''stabilityai/stable-diffusion-2-inpainting'''
A__ = PNDMScheduler.from_pretrained(__lowerCamelCase,subfolder='''scheduler''' )
A__ = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCamelCase,safety_checker=__lowerCamelCase,scheduler=__lowerCamelCase,torch_dtype=torch.floataa,)
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,generator=__lowerCamelCase,num_inference_steps=2,output_type='''np''',)
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 39
|
def UpperCamelCase__( UpperCamelCase__ : int = 1_00 )->int:
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 39
| 1
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 91
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str:
a_ : Optional[Any] = parent
a_ : List[str] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : str = use_input_mask
a_ : int = use_token_type_ids
a_ : List[str] = use_labels
a_ : Optional[int] = vocab_size
a_ : Any = hidden_size
a_ : int = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : str = intermediate_size
a_ : Union[str, Any] = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Tuple = type_vocab_size
a_ : Optional[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Dict = num_labels
a_ : str = scope
a_ : Optional[int] = range_bbox
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : int = bbox[i, j, 3]
a_ : str = bbox[i, j, 1]
a_ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : List[str] = bbox[i, j, 0]
a_ : Union[str, Any] = t
a_ : List[Any] = None
if self.use_input_mask:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : Tuple = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str:
a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
a_ : Any = self.num_labels
a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(
SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int:
return True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
a_ : str = LiltModelTester(self )
a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : List[str] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = torch.Size([1, 2, 7_6_8] )
a_ : int = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 32
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowercase : Tuple = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( __lowercase ):
a__ : Dict = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : int , **_lowercase : str ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__UpperCAmelCase = deprecated_arg[3:]
__UpperCAmelCase = not kwargs.pop(_a )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__UpperCAmelCase = kwargs.pop('''tpu_name''' , self.tpu_name )
__UpperCAmelCase = kwargs.pop('''device_idx''' , self.device_idx )
__UpperCAmelCase = kwargs.pop('''eager_mode''' , self.eager_mode )
__UpperCAmelCase = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_a )
a__ : str = field(
default=__lowercase , metadata={"help": "Name of TPU"} , )
a__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
a__ : bool = field(default=__lowercase , metadata={"help": "Benchmark models in eager model."} )
a__ : bool = field(
default=__lowercase , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def a ( self : Tuple ):
requires_backends(self , ['''tf'''] )
__UpperCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__UpperCAmelCase = None
return tpu
@cached_property
def a ( self : List[str] ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__UpperCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
__UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
__UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def a ( self : Tuple ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def a ( self : Dict ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def a ( self : List[Any] ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def a ( self : Union[str, Any] ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a ( self : Dict ):
return self.n_gpu > 0
| 365
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : int = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bloom"
a__ : List[Any] = ["past_key_values"]
a__ : Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , _lowercase : Dict=25_08_80 , _lowercase : str=64 , _lowercase : int=2 , _lowercase : Union[str, Any]=8 , _lowercase : Optional[Any]=1E-5 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=True , _lowercase : Any=1 , _lowercase : Dict=2 , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=0.0 , _lowercase : str=0.0 , _lowercase : str=1 , _lowercase : int=False , **_lowercase : List[str] , ):
__UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase = kwargs.pop('''n_embed''' , _lowercase )
__UpperCAmelCase = hidden_size if n_embed is None else n_embed
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = apply_residual_connection_post_layernorm
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = version.parse("1.12" )
def __init__( self : Optional[int] , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[int] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' , inverted_values_shape=_lowercase )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Any ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
@property
def a ( self : Dict ):
return 1E-3
def a ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 86
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase_ ), magnitude * sin(lowercase_ )]
return [magnitude * cos(radians(lowercase_ ) ), magnitude * sin(radians(lowercase_ ) )]
def UpperCamelCase (lowercase_: NDArray[floataa] , lowercase_: NDArray[floataa] , lowercase_: float = 10**-1 ) -> bool:
A__ : NDArray[floataa] = cross(lowercase_ , lowercase_ )
A__ : float = sum(lowercase_ )
return abs(lowercase_ ) < eps
if __name__ == "__main__":
# Test to check if it works
A_ : Optional[int] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
A_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
A_ : Optional[int] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
A_ : Union[str, Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
A_ : List[Any] = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
A_ : Tuple = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 192
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A_ : Any = pytest.mark.integration
@require_faiss
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : Tuple = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(A__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
A__ : Dataset = self._create_dummy_dataset()
A__ : Union[str, Any] = dset.map(
lambda A__ , A__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A__ , keep_in_memory=A__ )
A__ : int = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ : Optional[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def __A ( self ):
import faiss
A__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ : str = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __A ( self ):
import faiss
A__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ : Tuple = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __A ( self ):
A__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(A__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
A__ : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ : List[Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ : List[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
A__ : Any = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=A__ )
A__ , A__ : Any = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
import faiss
A__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ : Any = np.zeros(5 , dtype=np.floataa )
A__ : str = 1
A__ , A__ : Optional[Any] = index.search(A__ )
self.assertRaises(A__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ : str = index.search_batch(A__ )
self.assertRaises(A__ , index.search_batch , queries[0] )
A__ : str = [scores[0] for scores in total_scores]
A__ : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A__ )
def __A ( self ):
import faiss
A__ : Dict = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ : Dict = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A__ ):
A__ : List[Any] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
A__ : List[Any] = faiss.IndexFlat(5 )
A__ : Union[str, Any] = FaissIndex(custom_index=A__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
A__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__ ) as tmp_file:
index.save(tmp_file.name )
A__ : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ : Optional[Any] = np.zeros(5 , dtype=np.floataa )
A__ : Optional[int] = 1
A__ , A__ : List[Any] = index.search(A__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def UpperCamelCase (lowercase_: Dict ) -> Optional[Any]:
import faiss
A__ : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ : Optional[Any] = """index.faiss"""
A__ : Any = f"""mock://{index_name}"""
index.save(lowercase_ , storage_options=mockfs.storage_options )
A__ : str = FaissIndex.load(lowercase_ , storage_options=mockfs.storage_options )
A__ : int = np.zeros(5 , dtype=np.floataa )
A__ : Union[str, Any] = 1
A__ , A__ : Any = index.search(lowercase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ : List[str] = Elasticsearch()
A__ : List[Any] = {"""acknowledged""": True}
A__ : int = ElasticSearchIndex(es_client=A__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
A__ : Dict = """foo"""
A__ : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ : Tuple = index.search(A__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ : List[str] = """foo"""
A__ : str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ : Dict = index.search(A__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ : Union[str, Any] = ["""foo""", """bar""", """foobar"""]
A__ : Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ : Dict = index.search_batch(A__ )
A__ : Tuple = [scores[0] for scores in total_scores]
A__ : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) , 0 )
self.assertListEqual([1, 1, 1] , A__ )
# batched queries with timeout
A__ : int = ["""foo""", """bar""", """foobar"""]
A__ : List[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ : Tuple = index.search_batch(A__ , request_timeout=30 )
A__ : List[Any] = [scores[0] for scores in total_scores]
A__ : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__ ) , 0 )
self.assertListEqual([1, 1, 1] , A__ )
| 192
| 1
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ : Optional[int] = '''1'''
snake_case__ : str = '''0'''
snake_case__ : List[str] = '''1'''
snake_case__ : List[str] = ort.SessionOptions()
snake_case__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ : Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ : Dict = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ : str = ort.RunOptions()
snake_case__ : List[Any] = 128
snake_case__ : Union[str, Any] = 1
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ : Union[str, Any] = time.time()
snake_case__ : str = 2000
snake_case__ : Tuple = {}
for iter in range(max_iters):
snake_case__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 353
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274
| 0
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase__ ( ):
snake_case : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Tuple = False
# source code of `config_class`
snake_case : Tuple = inspect.getsource(lowercase__ )
snake_case : Optional[int] = _re_checkpoint.findall(lowercase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Optional[int] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : Any = True
break
snake_case : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
snake_case : Optional[Any] = "\n".join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 148
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = {}, {}
if padding is not None:
snake_case : Optional[Any] = padding
if truncation is not None:
snake_case : Union[str, Any] = truncation
if top_k is not None:
snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = {"image": image, "question": question}
else:
snake_case : List[str] = image
snake_case : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : List[Any] = load_image(inputs["image"] )
snake_case : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : Optional[int] = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowercase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowercase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
lowercase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowercase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( A ):
'''simple docstring'''
_A : Dict = VOCAB_FILES_NAMES
_A : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( A ):
'''simple docstring'''
_A : int = VOCAB_FILES_NAMES
_A : Optional[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowercase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowercase = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(A )
class __lowercase :
'''simple docstring'''
def __call__( self : Optional[int] , _a : Any , _a : Optional[str] = None , _a : Optional[str] = None , _a : Union[bool, str] = False , _a : Union[bool, str] = False , _a : Optional[int] = None , _a : Optional[Union[str, TensorType]] = None , _a : Optional[bool] = None , **_a : List[Any] , ):
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
UpperCamelCase__ = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
UpperCamelCase__ = titles if not isinstance(_a , _a ) else [titles]
UpperCamelCase__ = texts if not isinstance(_a , _a ) else [texts]
UpperCamelCase__ = len(_a )
UpperCamelCase__ = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.""" )
UpperCamelCase__ = super().__call__(_a , _a , padding=_a , truncation=_a )['''input_ids''']
UpperCamelCase__ = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )['''input_ids''']
UpperCamelCase__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
UpperCamelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase__ = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def A_ ( self : Union[str, Any] , _a : BatchEncoding , _a : DPRReaderOutput , _a : int = 16 , _a : int = 64 , _a : int = 4 , ):
UpperCamelCase__ = reader_input['''input_ids''']
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = reader_output[:3]
UpperCamelCase__ = len(_a )
UpperCamelCase__ = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
UpperCamelCase__ = []
for doc_id in sorted_docs:
UpperCamelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase__ = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase__ = len(_a )
UpperCamelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A_ ( self : Optional[int] , _a : List[int] , _a : List[int] , _a : int , _a : int , ):
UpperCamelCase__ = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase__ = sorted(_a , key=lambda _a : x[1] , reverse=_a )
UpperCamelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
UpperCamelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __lowercase ( A, A ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : int = READER_PRETRAINED_VOCAB_FILES_MAP
_A : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = READER_PRETRAINED_INIT_CONFIGURATION
_A : Union[str, Any] = ['''input_ids''', '''attention_mask''']
| 35
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase = get_logger(__name__)
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , _a : Optional[str] = None ):
UpperCamelCase__ = (
os.path.join(_a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def A_ ( self : str , _a : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(_a )
return os.path.join(self.extract_dir , hash_url_to_filename(_a ) )
def A_ ( self : Optional[Any] , _a : str , _a : bool ):
return force_extract or (
not os.path.isfile(_a ) and not (os.path.isdir(_a ) and os.listdir(_a ))
)
def A_ ( self : int , _a : str , _a : bool = False ):
UpperCamelCase__ = self.extractor.infer_extractor_format(_a )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(_a )
if self._do_extract(_a , _a ):
self.extractor.extract(_a , _a , _a )
return output_path
class __lowercase ( A ):
'''simple docstring'''
@classmethod
@abstractmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , **_a : List[str] ):
...
@staticmethod
@abstractmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
...
class __lowercase ( A, A ):
'''simple docstring'''
_A : List[bytes] = []
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
with open(_a , '''rb''' ) as f:
return f.read(_a )
@classmethod
def A_ ( cls : str , _a : Union[Path, str] , _a : bytes = b"" ):
if not magic_number:
UpperCamelCase__ = max(len(_a ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(_a , _a )
except OSError:
return False
return any(magic_number.startswith(_a ) for cls_magic_number in cls.magic_numbers )
class __lowercase ( A ):
'''simple docstring'''
@classmethod
def A_ ( cls : Union[str, Any] , _a : Union[Path, str] , **_a : Any ):
return tarfile.is_tarfile(_a )
@staticmethod
def A_ ( _a : int , _a : List[str] ):
def resolved(_a : str ) -> str:
return os.path.realpath(os.path.abspath(_a ) )
def badpath(_a : str , _a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_a , _a ) ).startswith(_a )
def badlink(_a : Tuple , _a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(_a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_a )
UpperCamelCase__ = resolved(_a )
for finfo in members:
if badpath(finfo.name , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_a , _a ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = tarfile.open(_a )
tar_file.extractall(_a , members=TarExtractor.safemembers(_a , _a ) )
tar_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : int = [b'''\x1F\x8B''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with gzip.open(_a , '''rb''' ) as gzip_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : int = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def A_ ( cls : Dict , _a : Union[Path, str] , _a : bytes = b"" ):
if super().is_extractable(_a , magic_number=_a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_a , '''rb''' ) as fp:
UpperCamelCase__ = _EndRecData(_a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(_a ) # CD is where we expect it to be
if len(_a ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(_a , _a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
os.makedirs(_a , exist_ok=_a )
with zipfile.ZipFile(_a , '''r''' ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with lzma.open(_a ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(_a , exist_ok=_a )
UpperCamelCase__ = rarfile.RarFile(_a )
rf.extractall(_a )
rf.close()
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(_a , '''rb''' ) as ifh, open(_a , '''wb''' ) as ofh:
dctx.copy_stream(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Any = [b'''\x42\x5A\x68''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
with bza.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase ( A ):
'''simple docstring'''
_A : Optional[int] = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(_a , exist_ok=_a )
with pyazr.SevenZipFile(_a , '''r''' ) as archive:
archive.extractall(_a )
class __lowercase ( A ):
'''simple docstring'''
_A : Union[str, Any] = [b'''\x04\x22\x4D\x18''']
@staticmethod
def A_ ( _a : Union[Path, str] , _a : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(_a , '''rb''' ) as compressed_file:
with open(_a , '''wb''' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class __lowercase :
'''simple docstring'''
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A_ ( cls : Dict ):
return max(
len(_a )
for extractor in cls.extractors.values()
if issubclass(_a , _a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A_ ( _a : Union[Path, str] , _a : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(_a , magic_number_length=_a )
except OSError:
return b""
@classmethod
def A_ ( cls : Optional[Any] , _a : Union[Path, str] , _a : bool = False ):
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = cls.infer_extractor_format(_a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A_ ( cls : str , _a : Union[Path, str] ): # <Added version="2.4.0"/>
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(_a , _a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_a , magic_number=_a ):
return extractor_format
@classmethod
def A_ ( cls : List[Any] , _a : Union[Path, str] , _a : Union[Path, str] , _a : Optional[str] = None , _a : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(_a ) , exist_ok=_a )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(_a ).with_suffix('''.lock''' ) )
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_a , _a ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_a , )
UpperCamelCase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(_a , _a )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_a ):
return extractor.extract(_a , _a )
| 35
| 1
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Dict = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = 'git_vision_model'
def __init__(self , lowercase=768 , lowercase=3072 , lowercase=12 , lowercase=12 , lowercase=3 , lowercase=224 , lowercase=16 , lowercase="quick_gelu" , lowercase=1E-5 , lowercase=0.0 , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = hidden_size
A_ : str = intermediate_size
A_ : int = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : Any = num_channels
A_ : List[Any] = patch_size
A_ : List[Any] = image_size
A_ : List[str] = initializer_range
A_ : List[str] = attention_dropout
A_ : int = layer_norm_eps
A_ : str = hidden_act
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_ : Dict = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'git'
def __init__(self , lowercase=None , lowercase=30522 , lowercase=768 , lowercase=6 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1024 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=True , lowercase=False , lowercase=101 , lowercase=102 , lowercase=None , **lowercase , ):
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , pad_token_id=lowercase , **lowercase )
if vision_config is None:
A_ : Optional[Any] = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
A_ : List[str] = GitVisionConfig(**lowercase )
A_ : str = vocab_size
A_ : Dict = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Dict = hidden_act
A_ : List[str] = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : List[Any] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = position_embedding_type
A_ : int = use_cache
A_ : int = tie_word_embeddings
A_ : List[str] = num_image_with_embedding
A_ : Tuple = bos_token_id
A_ : Tuple = eos_token_id
def _a (self ):
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.vision_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 352
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , ):
A_ : List[str] = parent
A_ : str = batch_size
A_ : Optional[Any] = image_size
A_ : List[str] = patch_size
A_ : List[str] = num_channels
A_ : List[str] = is_training
A_ : str = use_labels
A_ : List[str] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : Any = initializer_range
A_ : int = scope
A_ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Dict = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def _a (self ):
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : Any = ViTForMaskedImageModeling(lowercase )
model.to(lowercase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Dict = self.type_sequence_label_size
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Any = 1
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : str = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
),
) : Optional[int] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def _a (self ):
A_ : Any = ViTModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = ViTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Optional[int] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowercase )
A_ : List[str] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : str = model(**lowercase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowercase )
A_ : List[Any] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
A_ : Dict = prepare_img()
A_ : str = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : int = inputs.pixel_values.to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(lowercase , interpolate_pos_encoding=lowercase )
# verify the logits
A_ : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
A_ : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : Any = prepare_img()
A_ : List[str] = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : Any = inputs.pixel_values.to(lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : Optional[Any] = model(lowercase )
| 135
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase : Optional[int] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 13
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
| 86
| 0
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def lowercase_ ( ):
"""simple docstring"""
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 366
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : List[Any] = scope
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : int = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__UpperCAmelCase : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
__UpperCAmelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = LlamaModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : Optional[Any] = """single_label_classification"""
__UpperCAmelCase : int = input_dict["""input_ids"""]
__UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : str = """multi_label_classification"""
__UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase )
__UpperCAmelCase : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
__UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0}
__UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
__UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state
__UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCAmelCase : int = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) )
__UpperCAmelCase : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """
__UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" )
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase )
# greedy generation outputs
__UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 16
| 0
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCAmelCase_ ( *__snake_case ) -> Tuple:
"""simple docstring"""
with open(__a , '''r''' ) as fh:
fcntl.flock(__a , fcntl.LOCK_EX )
try:
print(*__a )
finally:
fcntl.flock(__a , fcntl.LOCK_UN )
UpperCAmelCase__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ = torch.device('''cuda''', local_rank)
UpperCAmelCase__ = socket.gethostname()
UpperCAmelCase__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ = dist.get_rank()
UpperCAmelCase__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 5
|
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 3 , UpperCamelCase = 7 , UpperCamelCase = 1000000 ) -> int:
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : int = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase__ : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase__ : Any = current_numerator
lowerCamelCase__ : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 129
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : Optional[int] =pd.read_csv('''sample_data.csv''', header=None)
_A : Any =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : List[str] =df.iloc[:, 1:2]
_A : int =actual_data.values.reshape(len_data, 1)
_A : Union[str, Any] =MinMaxScaler().fit_transform(actual_data)
_A : Optional[int] =10
_A : Union[str, Any] =5
_A : Union[str, Any] =20
_A : str =len_data - periods * look_back
_A : List[Any] =actual_data[:division]
_A : Optional[Any] =actual_data[division - look_back :]
_A , _A : Tuple =[], []
_A , _A : List[str] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : List[Any] =np.array(train_x)
_A : str =np.array(test_x)
_A : List[Any] =np.array([list(i.ravel()) for i in train_y])
_A : Any =np.array([list(i.ravel()) for i in test_y])
_A : Optional[Any] =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : Dict =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_A : List[str] =model.predict(x_test)
| 129
| 1
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__a = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowercase = None
lowercase = None
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowercase = datasets.Audio()
lowercase = "audio"
lowercase = AudioFolderConfig
lowercase = 42 # definition at the bottom of the script
lowercase = AudioClassification(audio_column="audio" , label_column="label" )
__a = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__a = AUDIO_EXTENSIONS
| 35
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = np.max(_lowerCAmelCase , axis=-1 , keepdims=_lowerCAmelCase )
snake_case__ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : int ):
snake_case__ : Optional[int] = {}
if "second_text" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase ( self : str , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None ):
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def lowerCamelCase ( self : List[Any] , snake_case_ : Dict ):
return self.model(**snake_case_ )
def lowerCamelCase ( self : int , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = model_outputs.logits[0].numpy()
snake_case__ : List[str] = softmax(snake_case_ )
snake_case__ : List[str] = np.argmax(snake_case_ )
snake_case__ : List[str] = self.model.config.idalabel[best_class]
snake_case__ : Optional[int] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 35
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class __A :
a__ : str = BlenderbotSmallConfig
a__ : Optional[Any] = {}
a__ : Optional[int] = """gelu"""
def __init__(self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Tuple=True , __a : Union[str, Any]=False , __a : Optional[Any]=99 , __a : List[str]=32 , __a : Tuple=2 , __a : Dict=4 , __a : Any=37 , __a : Any=0.1 , __a : Tuple=0.1 , __a : Any=20 , __a : Tuple=2 , __a : Any=1 , __a : Optional[Any]=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_blenderbot_small_inputs_dict(__a , __a , __a )
return config, inputs_dict
def _lowercase (self : Any , __a : int , __a : List[str] ):
UpperCAmelCase_ = TFBlenderbotSmallModel(config=__a ).get_decoder()
UpperCAmelCase_ = inputs_dict['input_ids']
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase_ = inputs_dict['head_mask']
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(__a , attention_mask=__a )[0]
UpperCAmelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None , snake_case_ : Tuple=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a__ : str = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a__ : Tuple = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a__ : Dict = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ : Tuple = True
a__ : Union[str, Any] = False
a__ : Dict = False
def _lowercase (self : Any ):
UpperCAmelCase_ = TFBlenderbotSmallModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class __A ( unittest.TestCase ):
a__ : Tuple = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i\'m going to throw up.\nand why is that?"""
]
a__ : Any = """facebook/blenderbot_small-90M"""
@cached_property
def _lowercase (self : int ):
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
UpperCAmelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 350
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = MobileBertTokenizer
a__ : str = MobileBertTokenizerFast
a__ : List[str] = True
a__ : Dict = True
a__ : Optional[int] = filter_non_english
a__ : int = """google/mobilebert-uncased"""
def _lowercase (self : List[str] ):
super().setUp()
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowercase (self : Tuple , __a : str ):
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = "unwanted, running"
return input_text, output_text
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def _lowercase (self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
UpperCAmelCase_ = self.get_tokenizer(do_lower_case=__a )
UpperCAmelCase_ = self.get_rust_tokenizer(do_lower_case=__a )
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ = {}
for i, token in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase (self : Optional[int] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase (self : str ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase (self : Any ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowercase (self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
UpperCAmelCase_ = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
UpperCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ["的", "人", "有"]
UpperCAmelCase_ = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = False
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 106
| 0
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( a_: Optional[int] ):
return getitem, k
def __UpperCAmelCase ( a_: str, a_: Dict ):
return setitem, k, v
def __UpperCAmelCase ( a_: List[Any] ):
return delitem, k
def __UpperCAmelCase ( a_: List[Any], a_: int, *a_: int ):
try:
return fun(_lowerCamelCase, *_lowerCamelCase ), None
except Exception as e:
return None, e
__a = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
__a = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
__a = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
__a = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
__a = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__a = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations", (
pytest.param(_add_items, id="add items" ),
pytest.param(_overwrite_items, id="overwrite items" ),
pytest.param(_delete_items, id="delete items" ),
pytest.param(_access_absent_items, id="access absent items" ),
pytest.param(_add_with_resize_up, id="add with resize up" ),
pytest.param(_add_with_resize_down, id="add with resize down" ),
), )
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Any = HashMap(initial_block_size=4 )
_UpperCAmelCase : int = {}
for _, (fun, *args) in enumerate(_lowerCamelCase ):
_UpperCAmelCase : List[Any] = _run_operation(_lowerCamelCase, _lowerCamelCase, *_lowerCamelCase )
_UpperCAmelCase : Dict = _run_operation(_lowerCamelCase, _lowerCamelCase, *_lowerCamelCase )
assert my_res == py_res
assert str(_lowerCamelCase ) == str(_lowerCamelCase )
assert set(_lowerCamelCase ) == set(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ):
def is_public(a_: str ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase : Optional[Any] = {name for name in dir({} ) if is_public(_lowerCamelCase )}
_UpperCAmelCase : int = {name for name in dir(HashMap() ) if is_public(_lowerCamelCase )}
assert dict_public_names > hash_public_names
| 145
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
assert x is not None
assert y is not None
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
# declaring the array for storing the dp values
__lowerCamelCase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase : Dict = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase : int = ""
__lowerCamelCase , __lowerCamelCase : int = m, n
while i > 0 and j > 0:
__lowerCamelCase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase : Any = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = '''AGGTAB'''
__A = '''GXTXAYB'''
__A = 4
__A = '''GTAB'''
__A, __A = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 135
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCamelCase :Optional[int] = generate_pascal_triangle(__magic_name__ )
for row_idx in range(__magic_name__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCamelCase :list[list[int]] = []
for current_row_idx in range(__magic_name__ ):
UpperCamelCase :Optional[Any] = populate_current_row(__magic_name__ , __magic_name__ )
triangle.append(__magic_name__ )
return triangle
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[int]:
"""simple docstring"""
UpperCamelCase :str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase , UpperCamelCase :Tuple = 1, 1
for current_col_idx in range(1 , __magic_name__ ):
calculate_current_element(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return current_row
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , ) -> None:
"""simple docstring"""
UpperCamelCase :Dict = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase :Tuple = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase :Optional[Any] = above_to_left_elt + above_to_right_elt
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCamelCase :list[list[int]] = [[1]]
for row_index in range(1 , __magic_name__ ):
UpperCamelCase :Union[str, Any] = [0] + result[-1] + [0]
UpperCamelCase :Any = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase :Tuple = sum(divmod(__magic_name__ , 2 ) )
UpperCamelCase :Optional[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase :str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase :Dict = row_first_half + row_second_half
result.append(__magic_name__ )
return result
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ : Callable , __magic_name__ : int ) -> None:
UpperCamelCase :int = f"""{func.__name__}({value})"""
UpperCamelCase :Union[str, Any] = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 62
|
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase :str = dict(zip(__magic_name__ , __magic_name__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62
| 1
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17
|
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase = logging.getLogger(__name__)
__lowercase = tf.data.AUTOTUNE
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=a__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=a__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=a__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=a__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=a__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=a__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=a__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=a__ , default=2**1_8 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=a__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=a__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=a__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=a__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=a__ , default=5_1_2 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=a__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=a__ , required=a__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=a__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
__UpperCamelCase =parser.parse_args()
return args
def lowerCAmelCase (__UpperCamelCase : List[Any] ):
"""simple docstring"""
try:
if args.tpu_name:
__UpperCamelCase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__UpperCamelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(a__ )
tf.tpu.experimental.initialize_tpu_system(a__ )
return tpu
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =0
for file in file_list:
__UpperCamelCase =file.split('''/''' )[-1]
__UpperCamelCase =re.search(r'''-\d+-(\d+)\.tfrecord''' , a__ ).group(1 )
__UpperCamelCase =int(a__ )
num_samples += sample_count
return num_samples
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=None ):
"""simple docstring"""
__UpperCamelCase =count_samples(a__ )
__UpperCamelCase =tf.data.Dataset.from_tensor_slices(a__ )
if shuffle:
__UpperCamelCase =dataset.shuffle(len(a__ ) )
__UpperCamelCase =tf.data.TFRecordDataset(a__ , num_parallel_reads=a__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__UpperCamelCase =dataset.apply(tf.data.experimental.assert_cardinality(a__ ) )
__UpperCamelCase =dataset.map(a__ , num_parallel_calls=a__ )
if shuffle:
assert shuffle_buffer_size is not None
__UpperCamelCase =dataset.shuffle(args.shuffle_buffer_size )
__UpperCamelCase =dataset.batch(a__ , drop_remainder=a__ )
__UpperCamelCase =dataset.map(a__ , num_parallel_calls=a__ )
__UpperCamelCase =dataset.prefetch(a__ )
return dataset
def lowerCAmelCase (__UpperCamelCase : List[Any] ):
"""simple docstring"""
if not args.no_tpu:
__UpperCamelCase =initialize_tpu(a__ )
__UpperCamelCase =tf.distribute.TPUStrategy(a__ )
else:
__UpperCamelCase =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
__UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer )
__UpperCamelCase =AutoConfig.from_pretrained(args.pretrained_model_config )
__UpperCamelCase =tokenizer.vocab_size
__UpperCamelCase =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
__UpperCamelCase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
__UpperCamelCase =count_samples(a__ )
__UpperCamelCase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__UpperCamelCase =steps_per_epoch * args.num_epochs
with strategy.scope():
__UpperCamelCase =TFAutoModelForMaskedLM.from_config(a__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__UpperCamelCase , __UpperCamelCase =create_optimizer(
num_train_steps=a__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=a__ , metrics=['''accuracy'''] )
def decode_fn(__UpperCamelCase : str ):
__UpperCamelCase ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(a__ , a__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__UpperCamelCase =DataCollatorForLanguageModeling(
tokenizer=a__ , mlm_probability=args.mlm_probability , mlm=a__ , return_tensors='''tf''' )
def mask_with_collator(__UpperCamelCase : List[str] ):
# TF really needs an isin() function
__UpperCamelCase =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
__UpperCamelCase , __UpperCamelCase =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(a__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=a__ , )
return batch
__UpperCamelCase =args.per_replica_batch_size * strategy.num_replicas_in_sync
__UpperCamelCase =prepare_dataset(
a__ , decode_fn=a__ , mask_fn=a__ , batch_size=a__ , shuffle=a__ , shuffle_buffer_size=args.shuffle_buffer_size , )
__UpperCamelCase =prepare_dataset(
a__ , decode_fn=a__ , mask_fn=a__ , batch_size=a__ , shuffle=a__ , )
__UpperCamelCase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=a__ ) )
model.fit(
a__ , validation_data=a__ , epochs=args.num_epochs , callbacks=a__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase = parse_args()
main(args)
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =SwinConfig()
__UpperCamelCase =swin_name.split('''_''' )
__UpperCamelCase =name_split[1]
__UpperCamelCase =int(name_split[4] )
__UpperCamelCase =int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 6, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "small":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "base":
__UpperCamelCase =1_2_8
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(4, 8, 1_6, 3_2)
else:
__UpperCamelCase =1_9_2
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__UpperCamelCase =2_1_8_4_1
else:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =img_size
__UpperCamelCase =num_classes
__UpperCamelCase =embed_dim
__UpperCamelCase =depths
__UpperCamelCase =num_heads
__UpperCamelCase =window_size
return config
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "patch_embed.proj" in name:
__UpperCamelCase =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase ='''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase ='''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase ='''layernorm.bias'''
if "head" in name:
__UpperCamelCase =name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase ='''swin.''' + name
return name
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase =key.split('''.''' )
__UpperCamelCase =int(key_split[1] )
__UpperCamelCase =int(key_split[3] )
__UpperCamelCase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase =val[:dim, :]
__UpperCamelCase =val[
dim : dim * 2, :
]
__UpperCamelCase =val[-dim:, :]
else:
__UpperCamelCase =val[
:dim
]
__UpperCamelCase =val[
dim : dim * 2
]
__UpperCamelCase =val[
-dim:
]
else:
__UpperCamelCase =val
return orig_state_dict
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
__UpperCamelCase =get_swin_config(__UpperCamelCase )
__UpperCamelCase =SwinForImageClassification(__UpperCamelCase )
model.eval()
__UpperCamelCase =convert_state_dict(timm_model.state_dict() , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
__UpperCamelCase =image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
__UpperCamelCase =timm_model(inputs['''pixel_values'''] )
__UpperCamelCase =model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 85
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""naver-clova-ix/donut-base-finetuned-docvqa"""
snake_case_ =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
snake_case_ ="""document_qa"""
snake_case_ =AutoProcessor
snake_case_ =VisionEncoderDecoderModel
snake_case_ =["""image""", """text"""]
snake_case_ =["""text"""]
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCAmelCase__ : List[str] = task_prompt.replace('''{user_input}''' ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.pre_processor.tokenizer(
__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_tensors='''pt''' ).input_ids
lowerCAmelCase__ : Optional[Any] = self.pre_processor(__lowerCamelCase ,return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) ,decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=__lowerCamelCase ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=__lowerCamelCase ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=__lowerCamelCase ,).sequences
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.pre_processor.batch_decode(__lowerCamelCase )[0]
lowerCAmelCase__ : Any = sequence.replace(self.pre_processor.tokenizer.eos_token ,'''''' )
lowerCAmelCase__ : Any = sequence.replace(self.pre_processor.tokenizer.pad_token ,'''''' )
lowerCAmelCase__ : Tuple = re.sub(R'''<.*?>''' ,'''''' ,__lowerCamelCase ,count=1 ).strip() # remove first task start token
lowerCAmelCase__ : Optional[Any] = self.pre_processor.tokenajson(__lowerCamelCase )
return sequence["answer"]
| 129
|
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
for i in range(0 ,lowerCamelCase_):
for _ in range(0 ,n - i - 1): # printing spaces
print(''' ''' ,end='''''')
for _ in range(0 ,i + 1): # printing stars
print('''* ''' ,end='''''')
print()
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
for i in range(lowerCamelCase_ ,0 ,-1):
for _ in range(lowerCamelCase_ ,0 ,-1): # printing stars
print('''* ''' ,end='''''')
print()
for _ in range(n - i + 1 ,0 ,-1): # printing spaces
print(''' ''' ,end='''''')
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''')
return
floyd(lowerCamelCase_) # upper half
reverse_floyd(lowerCamelCase_) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__snake_case : int =1
while K:
__snake_case : Optional[int] =int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__snake_case : str =int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 129
| 1
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=2, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=36, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=6, lowerCAmelCase=6, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, lowerCAmelCase=1_000, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =coordinate_size
lowerCamelCase_ =shape_size
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
lowerCamelCase_ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =(image_size // patch_size) ** 2 + 1
lowerCamelCase_ =self.text_seq_length + self.image_seq_length
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ =bbox[i, j, 3]
lowerCamelCase_ =bbox[i, j, 1]
lowerCamelCase_ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ =bbox[i, j, 2]
lowerCamelCase_ =bbox[i, j, 0]
lowerCamelCase_ =t
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCamelCase_ =LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# text + image
lowerCamelCase_ =model(snake_case__, pixel_values=snake_case__ )
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__ )
lowerCamelCase_ =model(snake_case__, bbox=snake_case__, pixel_values=snake_case__, token_type_ids=snake_case__ )
lowerCamelCase_ =model(snake_case__, bbox=snake_case__, pixel_values=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase_ =model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase_ =model(pixel_values=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =LayoutLMvaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__, labels=snake_case__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =LayoutLMvaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__, labels=snake_case__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ =model(
snake_case__, bbox=snake_case__, pixel_values=snake_case__, attention_mask=snake_case__, token_type_ids=snake_case__, start_positions=snake_case__, end_positions=snake_case__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
lowerCamelCase_
) =config_and_inputs
lowerCamelCase_ ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A_ , A_ , unittest.TestCase ):
lowercase : str =False
lowercase : Any =False
lowercase : List[Any] =False
lowercase : int =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase : Dict =(
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return True
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=snake_case__, hidden_size=37 )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
lowerCamelCase_ ={
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(snake_case__, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCamelCase_ =torch.ones(self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
elif model_class in get_values(snake_case__ ):
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
lowerCamelCase_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=snake_case__, )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ =type
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =LayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(snake_case__ )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=snake_case__, return_tensors='''pt''' ).pixel_values.to(snake_case__ )
lowerCamelCase_ =torch.tensor([[1, 2]] )
lowerCamelCase_ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCamelCase_ =model(
input_ids=input_ids.to(snake_case__ ), bbox=bbox.to(snake_case__ ), pixel_values=pixel_values.to(snake_case__ ), )
# verify the logits
lowerCamelCase_ =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, snake_case__ )
lowerCamelCase_ =torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], snake_case__, atol=1e-4 ) )
| 359
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6
| 0
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = '''Hello world! cécé herlolip'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = FairseqRobertaModel.from_pretrained(A_ )
roberta.eval() # disable dropout
lowerCAmelCase__ : str = roberta.model.encoder.sentence_encoder
lowerCAmelCase__ : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCAmelCase__ : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , A_ )
lowerCAmelCase__ : Optional[int] = XLMRobertaXLForSequenceClassification(A_ ) if classification_head else XLMRobertaXLForMaskedLM(A_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCAmelCase__ : Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
lowerCAmelCase__ : Dict = roberta_sent_encoder.embed_positions.weight
lowerCAmelCase__ : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCAmelCase__ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
lowerCAmelCase__ : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCAmelCase__ : BertLayer = model.roberta.encoder.layer[i]
lowerCAmelCase__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCAmelCase__ : RobertaAttention = layer.attention
lowerCAmelCase__ : Tuple = roberta_layer.self_attn_layer_norm.weight
lowerCAmelCase__ : int = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCAmelCase__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCAmelCase__ : List[str] = roberta_layer.self_attn.q_proj.weight
lowerCAmelCase__ : int = roberta_layer.self_attn.q_proj.bias
lowerCAmelCase__ : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
lowerCAmelCase__ : Tuple = roberta_layer.self_attn.k_proj.bias
lowerCAmelCase__ : Optional[Any] = roberta_layer.self_attn.v_proj.weight
lowerCAmelCase__ : Any = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCAmelCase__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCAmelCase__ : List[Any] = roberta_layer.self_attn.out_proj.weight
lowerCAmelCase__ : Dict = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCAmelCase__ : Any = roberta_layer.final_layer_norm.weight
lowerCAmelCase__ : str = roberta_layer.final_layer_norm.bias
# intermediate
lowerCAmelCase__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase__ : Dict = roberta_layer.fca.weight
lowerCAmelCase__ : List[Any] = roberta_layer.fca.bias
# output
lowerCAmelCase__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCAmelCase__ : Any = roberta_layer.fca.weight
lowerCAmelCase__ : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCAmelCase__ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight
lowerCAmelCase__ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].dense.bias
lowerCAmelCase__ : Any = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowerCAmelCase__ : Optional[Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCAmelCase__ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
lowerCAmelCase__ : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias
lowerCAmelCase__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
lowerCAmelCase__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCAmelCase__ : int = roberta.model.encoder.lm_head.weight
lowerCAmelCase__ : Optional[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCAmelCase__ : torch.Tensor = roberta.encode(A_ ).unsqueeze(0 ) # batch of size 1
lowerCAmelCase__ : str = model(A_ )[0]
if classification_head:
lowerCAmelCase__ : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A_ ) )
else:
lowerCAmelCase__ : Dict = roberta.model(A_ )[0]
print(our_output.shape , their_output.shape )
lowerCAmelCase__ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowerCAmelCase__ : int = torch.allclose(A_ , A_ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(A_ ).mkdir(parents=A_ , exist_ok=A_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A_ )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__UpperCamelCase : int = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 106
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["pixel_values"]
def __init__( self : str ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = True ,lowercase_ : Union[int, float] = 1 / 2_5_5 ,lowercase_ : bool = True ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = True ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase__ : Tuple = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ : Optional[Any] = get_size_dict(lowercase_ ,default_to_square=lowercase_ ,param_name='''crop_size''' )
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Optional[Any] = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : Tuple = do_rescale
lowerCAmelCase__ : str = rescale_factor
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : Optional[Any] = get_resize_output_image_size(lowercase_ ,size=size['''shortest_edge'''] ,default_to_square=lowercase_ )
return resize(lowercase_ ,size=lowercase_ ,resample=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : str ,):
lowerCAmelCase__ : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase_ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : np.ndarray ,lowercase_ : Union[int, float] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Union[str, Any] ,):
return rescale(lowercase_ ,scale=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Union[float, List[float]] ,lowercase_ : Union[float, List[float]] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : int ,):
return normalize(lowercase_ ,mean=lowercase_ ,std=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : int = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase_ : List[Any] ,):
lowerCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Union[str, Any] = get_size_dict(lowercase_ ,param_name='''size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,param_name='''crop_size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : str = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Any = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Tuple = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[int] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase__ : Optional[int] = [self.resize(image=lowercase_ ,size=lowercase_ ,resample=lowercase_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Tuple = [self.center_crop(image=lowercase_ ,size=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Tuple = [self.rescale(image=lowercase_ ,scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ : Union[str, Any] = [self.normalize(image=lowercase_ ,mean=lowercase_ ,std=lowercase_ ) for image in images]
lowerCAmelCase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ ,lowercase_ ) for image in images]
lowerCAmelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
| 106
| 1
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = row, column
SCREAMING_SNAKE_CASE : Optional[int] = [[default_value for c in range(UpperCAmelCase_ )] for r in range(UpperCAmelCase_ )]
def __str__( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE : int = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE : str = max(UpperCAmelCase_ , len(str(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : Any = f'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE : Optional[Any] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_ ) for row_vector in self.array )
return s
def __repr__( self : str ):
return str(self )
def _A ( self : Dict , UpperCAmelCase_ : tuple[int, int] ):
if not (isinstance(UpperCAmelCase_ , (list, tuple) ) and len(UpperCAmelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[str] , UpperCAmelCase_ : tuple[int, int] ):
assert self.validate_indicies(UpperCAmelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float ):
assert self.validate_indicies(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = value
def __add__( self : Dict , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Optional[int] = self[r, c] + another[r, c]
return result
def __neg__( self : Dict ):
SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Tuple = -self[r, c]
return result
def __sub__( self : Dict , UpperCAmelCase_ : Matrix ):
return self + (-another)
def __mul__( self : List[str] , UpperCAmelCase_ : int | float | Matrix ):
if isinstance(UpperCAmelCase_ , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : int = self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = f'''Unsupported type given for another ({type(UpperCAmelCase_ )})'''
raise TypeError(UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[str] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = self[r, c]
return result
def _A ( self : Any , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE : Union[str, Any] = v.transpose()
SCREAMING_SNAKE_CASE : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE : int = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE : str = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = 1, 2, -3
SCREAMING_SNAKE_CASE : str = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase , lowercase )}''' )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 319
|
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 62
| 1
|
"""simple docstring"""
import functools
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not all(isinstance(_snake_case , _snake_case ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_snake_case ) != 3 or not all(isinstance(_snake_case , _snake_case ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_snake_case ) == 0:
return 0
if min(_snake_case ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_snake_case ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
UpperCAmelCase = set(_snake_case )
@functools.cache
def dynamic_programming(_snake_case ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
@staticmethod
def _UpperCamelCase ( *A ,**A ):
pass
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = np.array(_snake_case )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = MaskGenerationPipeline(model=A ,image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCamelCase ( self ,A ,A ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _UpperCamelCase ( self ):
pass
@slow
@require_torch
def _UpperCamelCase ( self ):
UpperCAmelCase = pipeline("""mask-generation""" ,model="""facebook/sam-vit-huge""" )
UpperCAmelCase = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] ,)
# fmt: on
@require_torch
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = """facebook/sam-vit-huge"""
UpperCAmelCase = pipeline("""mask-generation""" ,model=A )
UpperCAmelCase = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] ,)
| 234
| 1
|
"""simple docstring"""
import requests
_a = '' # <-- Put your OpenWeatherMap appid here!
_a = 'https://api.openweathermap.org/data/2.5/'
def __a ( __lowerCamelCase = "Chicago", __lowerCamelCase = APPID ):
return requests.get(URL_BASE + "weather", params=locals() ).json()
def __a ( __lowerCamelCase = "Kolkata, India", __lowerCamelCase = APPID ):
return requests.get(URL_BASE + "forecast", params=locals() ).json()
def __a ( __lowerCamelCase = 55.68, __lowerCamelCase = 12.57, __lowerCamelCase = APPID ):
return requests.get(URL_BASE + "onecall", params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_a = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 61
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=1_6 , _lowerCamelCase=3_6 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=6 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: List[str] = batch_size
UpperCamelCase_: Optional[Any] = seq_length
UpperCamelCase_: Any = is_training
UpperCamelCase_: Union[str, Any] = use_input_mask
UpperCamelCase_: List[Any] = use_token_type_ids
UpperCamelCase_: Union[str, Any] = use_labels
UpperCamelCase_: int = vocab_size
UpperCamelCase_: Optional[int] = embedding_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[Any] = num_hidden_layers
UpperCamelCase_: int = num_hidden_groups
UpperCamelCase_: Tuple = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: Tuple = hidden_act
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: Optional[Any] = max_position_embeddings
UpperCamelCase_: Any = type_vocab_size
UpperCamelCase_: int = type_sequence_label_size
UpperCamelCase_: Tuple = initializer_range
UpperCamelCase_: List[str] = num_labels
UpperCamelCase_: List[Any] = num_choices
UpperCamelCase_: List[str] = scope
def _a ( self ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Dict = None
if self.use_input_mask:
UpperCamelCase_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Dict = None
UpperCamelCase_: Optional[int] = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = AlbertModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
UpperCamelCase_: str = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
UpperCamelCase_: str = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = AlbertForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Tuple = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , sentence_order_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = AlbertForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = AlbertForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = self.num_labels
UpperCamelCase_: Dict = AlbertForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: int = self.num_labels
UpperCamelCase_: Optional[Any] = AlbertForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = self.num_choices
UpperCamelCase_: int = AlbertForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_: Dict = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ):
UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): List[str] = config_and_inputs
UpperCamelCase_: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Any =True
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
UpperCamelCase_: List[str] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
UpperCamelCase_: Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
UpperCamelCase_: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def _a ( self ):
UpperCamelCase_: Tuple = AlbertModelTester(self )
UpperCamelCase_: int = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_: List[Any] = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Union[str, Any] = AlbertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: int = AlbertModel.from_pretrained('albert-base-v2' )
UpperCamelCase_: Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase_: List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_: Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
UpperCamelCase_: Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 292
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
A_ : List[str] = '.'
if __name__ == "__main__":
A_ : Dict = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
A_ : Dict = []
A_ : Optional[Any] = []
with open(doctest_file_path) as fp:
for line in fp:
A_ : Tuple = line.strip()
A_ : Any = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
A_ : str = '\n'.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 292
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
lowerCamelCase__: int =value
lowerCamelCase__: Node | None =None
lowerCamelCase__: Node | None =None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : Node) ->None:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =tree
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Node | None) ->int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__(self : int) ->Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __lowerCAmelCase ( a__ , a__ , a__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__a = (low + high) // 2
__a , __a , __a = max_subarray(a__ , a__ , a__ )
__a , __a , __a = max_subarray(a__ , mid + 1 , a__ )
__a , __a , __a = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> tuple[int, int, float]:
__a , __a = float('''-inf''' ), -1
__a , __a = float('''-inf''' ), -1
__a = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__a = summ
__a = i
__a = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__a = summ
__a = i
return max_left, max_right, (left_sum + right_sum)
def __lowerCAmelCase ( a__ ) -> float:
__a = [randint(1 , a__ ) for _ in range(a__ )]
__a = time.time()
max_subarray(a__ , 0 , input_size - 1 )
__a = time.time()
return end - start
def __lowerCAmelCase ( ) -> None:
__a = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__a = [time_max_subarray(a__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '''\t\t''' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 6
| 0
|
__magic_name__: int = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase ( _A ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def UpperCamelCase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000, 1000000 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 360
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[Any] = []
for i in range(_A ):
__magic_name__ : Dict = i / num_diffusion_timesteps
__magic_name__ : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ), _A ) )
return torch.tensor(_A, dtype=torch.floataa )
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Union[str, Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__magic_name__ : Tuple = betas_for_alpha_bar(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = 1.0 - self.betas
__magic_name__ : str = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Tuple = 1.0
# setable values
__magic_name__ : List[Any] = None
__magic_name__ : int = torch.from_numpy(np.arange(0 , lowerCAmelCase__ )[::-1].copy() )
__magic_name__ : List[Any] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
__magic_name__ : List[Any] = num_inference_steps
__magic_name__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Dict = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : int = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Tuple = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : str = torch.log(torch.clamp(lowerCAmelCase__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : List[str] = variance.log()
__magic_name__ : Optional[int] = beta.log()
__magic_name__ : Any = (predicted_variance + 1) / 2
__magic_name__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ ,__magic_name__ : List[Any] = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : List[str] = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : Union[str, Any] = t - 1
__magic_name__ : List[str] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Any = 1 - alpha_prod_t
__magic_name__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : Union[str, Any] = self.betas[t]
__magic_name__ : int = self.alphas[t]
else:
__magic_name__ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Tuple = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Tuple = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Tuple = torch.clamp(
lowerCAmelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : Tuple = 0
if t > 0:
__magic_name__ : Any = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ , device=model_output.device )
__magic_name__ : Tuple = self._get_variance(
lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Tuple = variance
elif self.variance_type == "learned_range":
__magic_name__ : int = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : Tuple = variance * variance_noise
__magic_name__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
__magic_name__ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : Any = timesteps.to(original_samples.device )
__magic_name__ : int = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : Any = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 138
| 0
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE( __lowercase = "" ) -> dict[str, float]:
A: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A: List[Any] = BeautifulSoup(requests.get(__lowercase ).text , '''html.parser''' )
A: int = soup.find_all('''td''' , attrs='''titleColumn''' )
A: List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowercase , __lowercase )
}
def SCREAMING_SNAKE_CASE( __lowercase = "IMDb_Top_250_Movies.csv" ) -> None:
A: Dict = get_imdb_top_aaa_movies()
with open(__lowercase , '''w''' , newline='''''' ) as out_file:
A: Dict = csv.writer(__lowercase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 319
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase_ : Tuple = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase_ : List[Any] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ : Optional[Any] = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase_ : List[str] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(lowerCamelCase )-1}" )
if "norm" in key:
UpperCamelCase_ : int = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ : int = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase_ : int = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(lowerCamelCase )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase_ : str = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ : Union[str, Any] = key[key.find('block' ) + len('block' )]
UpperCamelCase_ : Dict = key.replace(F"block{idx}" , F"block.{int(lowerCamelCase )-1}" )
if "attn.q" in key:
UpperCamelCase_ : Optional[Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase_ : Any = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase_ : Optional[int] = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase_ : int = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase_ : Optional[int] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase_ : Tuple = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase_ : Tuple = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase_ : List[str] = key.replace(F"linear_c{idx}" , F"linear_c.{int(lowerCamelCase )-1}" )
if "bot_conv" in key:
UpperCamelCase_ : Union[str, Any] = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase_ : Optional[int] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase_ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase_ : Tuple = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase_ : Any = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase_ : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase_ : Optional[Any] = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase_ : str = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase_ : Optional[Any] = value
return new_state_dict
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ : Union[str, Any] = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ : int = state_dict.pop(F"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ : int = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Optional[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=None ):
UpperCamelCase_ : List[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_ : Tuple = GLPNImageProcessor()
# prepare image
UpperCamelCase_ : List[Any] = prepare_img()
UpperCamelCase_ : str = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase_ : Any = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase_ : str = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
UpperCamelCase_ : Dict = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
UpperCamelCase_ : Optional[Any] = model(lowerCamelCase )
UpperCamelCase_ : str = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_ : Tuple = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
UpperCamelCase_ : Any = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"Unknown model name: {model_name}" )
UpperCamelCase_ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
lowerCAmelCase : deque[T] # Cache store of keys
lowerCAmelCase : set[T] # References of the keys in cache
lowerCAmelCase : int = 10 # Maximum capacity of cache
def __init__( self : int , lowerCamelCase__ : int ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = deque()
_UpperCAmelCase : Optional[Any] = set()
if not n:
_UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
_UpperCAmelCase : int = n
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCAmelCase : str = self.dq_store.pop()
self.key_reference.remove(lowerCamelCase__ )
else:
self.dq_store.remove(lowerCamelCase__ )
self.dq_store.appendleft(lowerCamelCase__ )
self.key_reference.add(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->None:
'''simple docstring'''
for k in self.dq_store:
print(lowerCamelCase__ )
def __repr__( self : int ) ->str:
'''simple docstring'''
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 234
|
'''simple docstring'''
def __lowerCAmelCase ():
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCAmelCase (__lowerCAmelCase ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : str = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCAmelCase : Union[str, Any] = (left + right) // 2
_UpperCAmelCase : List[str] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCAmelCase : Tuple = mid + 1
else:
_UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def __lowerCAmelCase (__lowerCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def __lowerCAmelCase ():
from timeit import timeit
print("Running benchmarks" )
_UpperCAmelCase : Tuple = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCAmelCase : str = timeit(F"""{func}(grid=grid)""" , setup=__lowerCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 234
| 1
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCamelCase_ = TOKENIZER_CLASSES
else:
UpperCamelCase_ = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE_ , tokenizer_name + "Fast" )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
UpperCamelCase_ = True
if checkpoint_name is None:
UpperCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCamelCase_ = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCamelCase_ = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCamelCase_ , UpperCamelCase_ = checkpoint.split("/" )
UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif add_prefix:
UpperCamelCase_ = checkpoint
UpperCamelCase_ = dump_path
else:
UpperCamelCase_ = None
UpperCamelCase_ = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCamelCase_ = file_path.split(SCREAMING_SNAKE_CASE_ )[-1][0]
if next_char == "/":
UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCamelCase_ = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ , filename_prefix=SCREAMING_SNAKE_CASE_ )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(SCREAMING_SNAKE_CASE_ )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 60
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :int = KandinskyVaaImgaImgPipeline
UpperCamelCase_ :Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase_ :Dict = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase_ :Tuple = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ :int = False
@property
def UpperCAmelCase_ ( self )-> List[str]:
return 32
@property
def UpperCAmelCase_ ( self )-> List[Any]:
return 32
@property
def UpperCAmelCase_ ( self )-> Tuple:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self )-> Any:
return 100
@property
def UpperCAmelCase_ ( self )-> Tuple:
torch.manual_seed(0 )
UpperCamelCase_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase_ = UNetaDConditionModel(**_lowercase )
return model
@property
def UpperCAmelCase_ ( self )-> List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self )-> Any:
torch.manual_seed(0 )
UpperCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.dummy_unet
UpperCamelCase_ = self.dummy_movq
UpperCamelCase_ = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCamelCase_ = DDIMScheduler(**_lowercase )
UpperCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self , _lowercase , _lowercase=0 )-> Tuple:
UpperCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
UpperCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ = Image.fromarray(np.uinta(_lowercase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowercase ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_lowercase )
else:
UpperCamelCase_ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
UpperCamelCase_ = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_lowercase )
UpperCamelCase_ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = pipe(**self.get_dummy_inputs(_lowercase ) )
UpperCamelCase_ = output.images
UpperCamelCase_ = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_ = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCamelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase_ = "A red cartoon frog, 4k"
UpperCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
UpperCamelCase_ = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase_ , UpperCamelCase_ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase_ = pipeline(
image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 60
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A__ ( UpperCamelCase ):
A, A = analyze_text(UpperCamelCase )
A = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
A = sum(single_char_strings.values() )
# one length string
A = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
A = single_char_strings[ch]
A = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
A = sum(two_char_strings.values() )
A = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
A = cha + cha
if sequence in two_char_strings:
A = two_char_strings[sequence]
A = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def A__ ( UpperCamelCase ):
A = Counter() # type: ignore
A = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A__ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 292
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : int = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''marian'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self :int , __UpperCamelCase :Any=5_81_01 , __UpperCamelCase :int=None , __UpperCamelCase :Union[str, Any]=10_24 , __UpperCamelCase :Union[str, Any]=12 , __UpperCamelCase :str=40_96 , __UpperCamelCase :int=16 , __UpperCamelCase :int=12 , __UpperCamelCase :Optional[Any]=40_96 , __UpperCamelCase :Optional[Any]=16 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :Dict=0.0 , __UpperCamelCase :str=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :Any="gelu" , __UpperCamelCase :Any=10_24 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Optional[Any]=0.0 , __UpperCamelCase :Union[str, Any]=0.0 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :List[str]=5_81_00 , __UpperCamelCase :str=False , __UpperCamelCase :Optional[int]=5_81_00 , __UpperCamelCase :List[Any]=0 , __UpperCamelCase :List[str]=0 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = vocab_size
A = decoder_vocab_size or vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class _UpperCAmelCase ( lowercase_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A = {0: "batch"}
A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A = {0: "batch", 1: "decoder_sequence"}
A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
else:
A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(__UpperCamelCase , self ).outputs
if self.use_past:
A, A = self.num_layers
for i in range(__UpperCamelCase ):
A = {0: "batch", 2: "past_sequence + sequence"}
A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
A = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
A = common_inputs["decoder_input_ids"].shape[1]
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A, A = self.num_layers
A = min(__UpperCamelCase , __UpperCamelCase )
A = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A, A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A = seqlen + 2
A, A = self.num_layers
A, A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs["attention_mask"].dtype
A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def lowerCamelCase ( self :Tuple , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
A = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :PreTrainedTokenizer , __UpperCamelCase :int = -1 , __UpperCamelCase :int = -1 , __UpperCamelCase :bool = False , __UpperCamelCase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
A = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str] , __UpperCamelCase :str , __UpperCamelCase :str ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def lowerCamelCase ( self :List[str] ):
return 1e-4
| 292
| 1
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ):
"""simple docstring"""
if tokenize_kwargs is None:
snake_case = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case = truncation
snake_case = tokenize_kwargs
snake_case = {}
if return_tensors is not None:
snake_case = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case ( self , lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case = self.framework
snake_case = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
return model_inputs
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.model(**lowerCAmelCase )
return model_outputs
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
| 149
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 5_02_57 , lowerCAmelCase = 10_24 , lowerCAmelCase = 7_68 , lowerCAmelCase = 12 , lowerCAmelCase = 12 , lowerCAmelCase = None , lowerCAmelCase = "gelu_new" , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 0.02 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = False , ):
"""simple docstring"""
super().__init__()
snake_case = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
snake_case = prefix_inner_dim
snake_case = prefix_hidden_dim
snake_case = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case = (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case = GPTaConfig(
vocab_size=lowerCAmelCase , n_positions=lowerCAmelCase , n_embd=lowerCAmelCase , n_layer=lowerCAmelCase , n_head=lowerCAmelCase , n_inner=lowerCAmelCase , activation_function=lowerCAmelCase , resid_pdrop=lowerCAmelCase , embd_pdrop=lowerCAmelCase , attn_pdrop=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , initializer_range=lowerCAmelCase , scale_attn_weights=lowerCAmelCase , use_cache=lowerCAmelCase , scale_attn_by_inverse_layer_idx=lowerCAmelCase , reorder_and_upcast_attn=lowerCAmelCase , )
snake_case = GPTaLMHeadModel(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
snake_case = self.encode_prefix(lowerCAmelCase )
snake_case = self.decode_prefix(lowerCAmelCase )
snake_case = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case = self.transformer(inputs_embeds=lowerCAmelCase , labels=lowerCAmelCase , attention_mask=lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return torch.zeros(lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encode_prefix(lowerCAmelCase )
@torch.no_grad()
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = torch.split(lowerCAmelCase , 1 , dim=0 )
snake_case = []
snake_case = []
for feature in features:
snake_case = self.decode_prefix(feature.to(lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case ,snake_case = self.generate_beam(
input_embeds=lowerCAmelCase , device=lowerCAmelCase , eos_token_id=lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case = torch.stack(lowerCAmelCase )
snake_case = torch.stack(lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = 5 , lowerCAmelCase = 67 , lowerCAmelCase = 1.0 , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = eos_token_id
snake_case = None
snake_case = None
snake_case = torch.ones(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.int )
snake_case = torch.zeros(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
snake_case = input_embeds
else:
snake_case = self.transformer.transformer.wte(lowerCAmelCase )
for i in range(lowerCAmelCase ):
snake_case = self.transformer(inputs_embeds=lowerCAmelCase )
snake_case = outputs.logits
snake_case = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case = logits.softmax(-1 ).log()
if scores is None:
snake_case ,snake_case = logits.topk(lowerCAmelCase , -1 )
snake_case = generated.expand(lowerCAmelCase , *generated.shape[1:] )
snake_case ,snake_case = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case = next_tokens
else:
snake_case = tokens.expand(lowerCAmelCase , *tokens.shape[1:] )
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case = -float(np.inf )
snake_case = 0
snake_case = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case = scores_sum / seq_lengths[:, None]
snake_case ,snake_case = scores_sum_average.view(-1 ).topk(lowerCAmelCase , -1 )
snake_case = next_tokens // scores_sum.shape[1]
snake_case = seq_lengths[next_tokens_source]
snake_case = next_tokens % scores_sum.shape[1]
snake_case = next_tokens.unsqueeze(1 )
snake_case = tokens[next_tokens_source]
snake_case = torch.cat((tokens, next_tokens) , dim=1 )
snake_case = generated[next_tokens_source]
snake_case = scores_sum_average * seq_lengths
snake_case = is_stopped[next_tokens_source]
snake_case = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case = torch.cat((generated, next_token_embed) , dim=1 )
snake_case = is_stopped + next_tokens.eq(lowerCAmelCase ).squeeze()
if is_stopped.all():
break
snake_case = scores / seq_lengths
snake_case = scores.argsort(descending=lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
snake_case = [tokens[i] for i in order]
snake_case = torch.stack(lowerCAmelCase , dim=0 )
snake_case = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149
| 1
|
from collections import namedtuple
a =namedtuple("""from_to""", """from_ to""")
a ={
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ', '.join(lowerCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ', '.join(lowerCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'Wrong input data\'s dimensions... '
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'Wrong input data\'s shape... '
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Any = (
'Input data have different datatype... '
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_UpperCAmelCase )
lowerCAmelCase : int = []
for value in value_array:
lowerCAmelCase : Tuple = euclidean(_UpperCAmelCase, dataset[0] )
lowerCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Dict = euclidean(_UpperCAmelCase, _UpperCAmelCase )
if dist > temp_dist:
lowerCAmelCase : Tuple = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return np.dot(_UpperCAmelCase, _UpperCAmelCase ) / (norm(_UpperCAmelCase ) * norm(_UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138
| 0
|
import os
from datetime import datetime as dt
from github import Github
__A = [
'good first issue',
'feature request',
'wip',
]
def _lowerCamelCase() -> List[str]:
_lowerCAmelCase =Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase =g.get_repo("""huggingface/accelerate""" )
_lowerCAmelCase =repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase =sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCamelCase : i.created_at , reverse=a__ )
_lowerCAmelCase =comments[0] if len(a__ ) > 0 else None
_lowerCAmelCase =dt.utcnow()
_lowerCAmelCase =(current_time - issue.updated_at).days
_lowerCAmelCase =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 369
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : list[list[int]] = []
_a : list[int] = []
_a : List[str] = 0
_a : List[str] = sum(lowerCAmelCase_ )
create_state_space_tree(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return result
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> None:
if sum(lowerCAmelCase_ ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase_ )) < max_sum:
return
if sum(lowerCAmelCase_ ) == max_sum:
result.append(lowerCAmelCase_ )
return
for index in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
create_state_space_tree(
lowerCAmelCase_ , lowerCAmelCase_ , index + 1 , [*path, nums[index]] , lowerCAmelCase_ , remaining_nums_sum - nums[index] , )
__lowerCAmelCase = [3, 34, 4, 12, 5, 2]
__lowerCAmelCase = 9
__lowerCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 89
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : str = set()
lowerCamelCase__ : Any = []
def parse_line(_UpperCAmelCase ):
for line in fp:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Any = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : str = '\n'.join(_UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCAmelCase )
buffer.clear()
continue
else:
lowerCamelCase__ : List[str] = line.strip()
buffer.append(_UpperCAmelCase )
if from_gh:
for filename in os.listdir(_UpperCAmelCase ):
lowerCamelCase__ : Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
else:
try:
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCAmelCase ) as fp:
parse_line(_UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Optional[int] = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCAmelCase , _UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
return values.split(',' )
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Dict = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 50
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ : Optional[Any] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a__ )
class a_ ( a__ ):
'''simple docstring'''
lowerCamelCase__ : int = "rag"
lowerCamelCase__ : List[Any] = True
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=" / ", lowerCamelCase_=" // ", lowerCamelCase_=5, lowerCamelCase_=3_0_0, lowerCamelCase_=7_6_8, lowerCamelCase_=8, lowerCamelCase_="wiki_dpr", lowerCamelCase_="train", lowerCamelCase_="compressed", lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, forced_eos_token_id=SCREAMING_SNAKE_CASE_, is_encoder_decoder=SCREAMING_SNAKE_CASE_, prefix=SCREAMING_SNAKE_CASE_, vocab_size=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase__ : Union[str, Any] = kwargs.pop('question_encoder' )
lowerCamelCase__ : str = question_encoder_config.pop('model_type' )
lowerCamelCase__ : Optional[Any] = kwargs.pop('generator' )
lowerCamelCase__ : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ : str = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[Any] = reduce_loss
lowerCamelCase__ : Optional[Any] = label_smoothing
lowerCamelCase__ : int = exclude_bos_score
lowerCamelCase__ : Tuple = do_marginalize
lowerCamelCase__ : Optional[Any] = title_sep
lowerCamelCase__ : Any = doc_sep
lowerCamelCase__ : List[str] = n_docs
lowerCamelCase__ : Optional[int] = max_combined_length
lowerCamelCase__ : List[Any] = dataset
lowerCamelCase__ : Union[str, Any] = dataset_split
lowerCamelCase__ : str = index_name
lowerCamelCase__ : List[str] = retrieval_vector_size
lowerCamelCase__ : Any = retrieval_batch_size
lowerCamelCase__ : Optional[Any] = passages_path
lowerCamelCase__ : Dict = index_path
lowerCamelCase__ : Optional[Any] = use_dummy_dataset
lowerCamelCase__ : Any = output_retrieved
lowerCamelCase__ : Optional[int] = do_deduplication
lowerCamelCase__ : List[str] = use_cache
if self.forced_eos_token_id is None:
lowerCamelCase__ : int = getattr(self.generator, 'forced_eos_token_id', SCREAMING_SNAKE_CASE_ )
@classmethod
def a__ (cls, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **SCREAMING_SNAKE_CASE_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : str = self.question_encoder.to_dict()
lowerCamelCase__ : Union[str, Any] = self.generator.to_dict()
lowerCamelCase__ : Optional[Any] = self.__class__.model_type
return output
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
super().__init__()
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Tuple , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : str , ):
lowerCAmelCase : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : int = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : int = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : List[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
lowerCAmelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCamelCase_ ), "This is a local test"
| 60
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : List[str] = jax.device_count()
lowerCAmelCase : Optional[int] = num_samples * [prompt]
lowerCAmelCase : Any = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = replicate(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = shard(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[Any] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : str = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : List[str] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = '''stabilityai/stable-diffusion-2'''
lowerCAmelCase, lowerCAmelCase : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowerCAmelCase, lowerCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowerCAmelCase : List[Any] = scheduler_params
lowerCAmelCase : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase : Any = jax.device_count()
lowerCAmelCase : int = num_samples * [prompt]
lowerCAmelCase : int = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowerCAmelCase : Dict = replicate(UpperCamelCase_ )
lowerCAmelCase : Tuple = shard(UpperCamelCase_ )
lowerCAmelCase : int = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[int] = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowerCAmelCase : Tuple = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=2_5 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowerCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase : Tuple = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 60
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __snake_case ( datasets.BuilderConfig):
"""simple docstring"""
lowercase = None
class __snake_case ( datasets.ArrowBasedBuilder):
"""simple docstring"""
lowercase = PandasConfig
def __lowercase ( self : List[Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : Any , lowerCamelCase : int ) -> List[str]:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase_ : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
lowerCAmelCase_ : Optional[Any] = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase_ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ : int = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCAmelCase_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase_ : int = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def __lowercase ( self : Optional[int] , lowerCamelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase_ : Dict = table_cast(UpperCamelCase__ , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self : Optional[int] , lowerCamelCase : Tuple ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCAmelCase_ : str = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase__ ) )
yield i, self._cast_table(UpperCamelCase__ )
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
| 0
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( A_ = ""):
UpperCamelCase__: Optional[int] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
UpperCamelCase__: str = BeautifulSoup(requests.get(A_).text ,"html.parser")
UpperCamelCase__: List[str] = soup.find_all("td" ,attrs="titleColumn")
UpperCamelCase__: Any = soup.find_all("td" ,class_="ratingColumn imdbRating")
return {
title.a.text: float(rating.strong.text)
for title, rating in zip(A_ ,A_)
}
def lowerCAmelCase_ ( A_ = "IMDb_Top_250_Movies.csv"):
UpperCamelCase__: Dict = get_imdb_top_aaa_movies()
with open(A_ ,"w" ,newline="") as out_file:
UpperCamelCase__: str = csv.writer(A_)
writer.writerow(["Movie title", "IMDb rating"])
for title, rating in movies.items():
writer.writerow([title, rating])
if __name__ == "__main__":
write_movies()
| 149
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Any = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**__lowerCamelCase )
return config
def UpperCAmelCase_ ( self: int , **__lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(**__lowerCamelCase )
UpperCamelCase__: List[str] = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: int = 10, 0.0
UpperCamelCase__: List[Any] = self.dummy_model()
UpperCamelCase__: Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
UpperCamelCase__: Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Tuple = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
UpperCamelCase__: Tuple = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Any = self.scheduler_classes[0]
UpperCamelCase__: Optional[int] = self.get_scheduler_config()
UpperCamelCase__: str = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.scheduler_classes[0]
UpperCamelCase__: Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__: Any = scheduler_class(**__lowerCamelCase )
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
UpperCamelCase__: Tuple = self.dummy_model()
UpperCamelCase__: Union[str, Any] = self.dummy_sample_deter
UpperCamelCase__: Dict = self.dummy_sample_deter + 0.1
UpperCamelCase__: Dict = self.dummy_sample_deter - 0.1
UpperCamelCase__: int = samplea.shape[0]
UpperCamelCase__: List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__: Union[str, Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase )
UpperCamelCase__: str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__: Optional[int] = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase )
UpperCamelCase__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Tuple = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = self.full_loop()
UpperCamelCase__: List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
UpperCamelCase__: Optional[int] = torch.sum(torch.abs(__lowerCamelCase ) )
UpperCamelCase__: List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 149
| 1
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowerCAmelCase = getLogger(__name__)
def UpperCAmelCase_ (__a : List[str] , __a : str , __a : str , __a : int = 8 , __a : int = 1_0_2_4 , __a : Optional[Any]="val" , __a : str=None , __a : int=False , __a : int="summarization" , __a : str=None , __a : str=1 , __a : Dict = None , __a : Union[str, Any]="" , **__a : List[Any] , ):
"""simple docstring"""
_a : Union[str, Any] = str(__a )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=__a )
_a : List[Any] = Path(__a )
_a : Optional[Any] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__a )
_a : int = AutoModelForSeqaSeqLM.from_pretrained(__a ).cuda()
if fpaa:
_a : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__a , __a ) # update config with task specific params
_a : str = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_a : Optional[Any] = num_return_sequences
_a : int = AutoTokenizer.from_pretrained(__a )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_a : Dict = tokenizer.model_max_length
if prefix is None:
_a : Any = prefix or getattr(model.config , 'prefix' , '' ) or ''
_a : List[str] = SeqaSeqDataset(
__a , __a , __a , max_target_length=1_0_2_4 , type_path=__a , n_obs=__a , prefix=__a , **__a , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_a : List[str] = ds.make_sortish_sampler(__a , distributed=__a , add_extra_examples=__a , shuffle=__a )
_a : List[Any] = DataLoader(__a , sampler=__a , batch_size=__a , collate_fn=ds.collate_fn )
_a : Optional[Any] = []
for batch in tqdm(__a ):
_a : List[str] = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=__a , num_beams=__a , **__a , )
_a : Dict = tokenizer.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
_a : Any = batch['ids']
if num_return_sequences > 1:
_a : Tuple = chunks(__a , __a ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__a ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(__a , __a )
return results, sampler.num_replicas
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Tuple = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=__a , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=__a , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=__a , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=__a , default=__a )
parser.add_argument(
'--type_path' , type=__a , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=__a , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__a , default=8 , required=__a , help='batch size' )
parser.add_argument(
'--local_rank' , type=__a , default=-1 , required=__a , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=__a , default=__a , required=__a , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=__a , default=1 , required=__a , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=__a , default=6_0_0 , required=__a , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=__a , default=__a , required=__a )
parser.add_argument('--tgt_lang' , type=__a , default=__a , required=__a )
parser.add_argument(
'--prefix' , type=__a , required=__a , default=__a , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
_a : Any = time.time()
_a, _a : Optional[int] = parser.parse_known_args()
_a : List[Any] = parse_numeric_n_bool_cl_kwargs(__a )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
_a : int = Path(args.save_dir + '_tmp' )
Path(__a ).mkdir(exist_ok=__a ) # this handles locking.
_a : List[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_a : Optional[Any] = {}
if args.src_lang is not None:
_a : Any = args.src_lang
if args.tgt_lang is not None:
_a : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__a )
_a, _a : Optional[Any] = eval_data_dir(
args.data_dir , __a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__a , **__a , )
if args.local_rank <= 0:
_a : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=__a )
_a : Optional[int] = gather_results_from_each_node(__a , __a , args.sync_timeout )
_a : Optional[int] = combine_partial_results(__a )
if args.num_return_sequences > 1:
_a : List[Any] = save_dir.joinpath('pseudolabel_results.json' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__a , __a )
return
_a : List[str] = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(__a ) as f:
_a : List[Any] = [x.rstrip() for x in f.readlines()][: len(__a )]
# Calculate metrics, save metrics, and save _generations.txt
_a : List[str] = 'translation' in args.task
_a : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
_a : Dict = 'bleu' if calc_bleu else 'rouge'
_a : Dict = score_fn(__a , __a )
_a : str = len(__a )
_a : Tuple = time.time() - start_time
_a : List[str] = round(runtime / metrics['n_obs'] , 4 )
_a : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_a : Optional[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(__a , __a , indent=__a )
print(__a )
write_txt_file(__a , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__a , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__a )
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
_a : int = []
for partial_result in partial_results:
records.extend(__a )
_a : Optional[int] = sorted(__a , key=lambda __a : x["id"] )
_a : Any = [x['pred'] for x in records]
return preds
def UpperCAmelCase_ (__a : int , __a : Optional[int] , __a : Any ):
"""simple docstring"""
_a : List[Any] = time.time()
logger.info('waiting for all nodes to finish' )
_a : List[Any] = None
while (time.time() - start_wait) < timeout:
_a : Optional[int] = list(save_dir.glob('rank_*.json' ) )
if len(__a ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_a : Optional[int] = lmap(__a , __a )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 5
|
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ) -> List[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = out_features
_snake_case = num_labels
_snake_case = scope
_snake_case = num_stages
def lowercase (self ) -> List[Any]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase (self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase (self ) -> Any:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_snake_case = UperNetForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase (self ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
), (
_snake_case
), (
_snake_case
),
) = config_and_inputs
_snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Optional[Any]:
_snake_case = UperNetModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase (self ) -> Union[str, Any]:
return
def lowercase (self ) -> Union[str, Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase (self ) -> List[str]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> str:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> int:
pass
def lowercase (self ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(UpperCAmelCase )
_snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_snake_case = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase (self ) -> Optional[Any]:
pass
@slow
def lowercase (self ) -> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_snake_case = Image.open(_SCREAMING_SNAKE_CASE ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
def lowercase (self ) -> Any:
_snake_case = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_snake_case = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCAmelCase )
_snake_case = prepare_img()
_snake_case = processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 341
| 0
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : int=None ):
if not conversation_id:
lowerCAmelCase_ : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase_ : List[Any] = []
if generated_responses is None:
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : uuid.UUID = conversation_id
lowerCAmelCase_ : List[str] = past_user_inputs
lowerCAmelCase_ : List[str] = generated_responses
lowerCAmelCase_ : Optional[str] = text
def __eq__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
lowerCAmelCase_ : Tuple = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
lowerCAmelCase_ : Any = text
def SCREAMING_SNAKE_CASE__ ( self : str ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase_ : List[str] = None
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : List[str] ):
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ):
lowerCAmelCase_ : Dict = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
lowerCAmelCase_ : List[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
_lowerCAmelCase, r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""", )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Any ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase_ : List[str] = self.tokenizer.eos_token
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : str = {}
if min_length_for_response is not None:
lowerCAmelCase_ : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase_ : Dict = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase_ : List[str] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=0 , **SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Tuple = super().__call__(SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase_ : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase_ : Any = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
lowerCAmelCase_ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase_ : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=1_0 , **SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Any = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase_ : Optional[int] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
lowerCAmelCase_ : Tuple = max_length - minimum_tokens
lowerCAmelCase_ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase_ : List[Any] = model_inputs["""attention_mask"""][:, -trim:]
lowerCAmelCase_ : str = model_inputs.pop('conversation' )
lowerCAmelCase_ : int = max_length
lowerCAmelCase_ : List[Any] = self.model.generate(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
lowerCAmelCase_ : Any = 1
else:
lowerCAmelCase_ : Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str=True ):
lowerCAmelCase_ : Optional[Any] = model_outputs["""output_ids"""]
lowerCAmelCase_ : Optional[int] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : str = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : List[str] = self.tokenizer.eos_token_id
lowerCAmelCase_ : str = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
lowerCAmelCase_ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 366
|
"""simple docstring"""
import re
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : str = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase__ : Optional[int] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 289
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : int = 0
_lowercase : bool = False
_lowercase : float = 3.0
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : int =GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
a__ : List[str] =Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
a__ : Optional[Any] =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , lowerCAmelCase__ )
@require_multi_gpu
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : List[Any] =["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCAmelCase : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase : str = torch.nn.Linear(100, 200)
UpperCAmelCase : Dict = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase : int = """"""
UpperCAmelCase : Union[str, Any] = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 95
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A ( unittest.TestCase ):
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
UpperCAmelCase__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "sgugger/tiny-distilbert-classification"
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ (self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
# set architectures equal to `None`
UpperCAmelCase__ = None
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def lowercase_ (self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tinier_bart"
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ (self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tinier_bart"
UpperCAmelCase__ = AutoConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(__UpperCAmelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(__UpperCAmelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : List[str] ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
UpperCAmelCase__ = PyTorchBenchmark(__UpperCAmelCase )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 350
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'facebook/bart-large-mnli'
__UpperCAmelCase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__UpperCAmelCase : Optional[int] = 'text_classifier'
__UpperCAmelCase : int = AutoTokenizer
__UpperCAmelCase : Dict = AutoModelForSequenceClassification
__UpperCAmelCase : int = ['text', ['text']]
__UpperCAmelCase : Optional[int] = ['text']
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
super().setup()
UpperCAmelCase__ = self.model.config
UpperCAmelCase__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCAmelCase__ = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 143
| 0
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'openai-gpt'
__SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _snake_case=4_0478 , _snake_case=512 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1e-5 , _snake_case=0.02 , _snake_case="cls_index" , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=0.1 , **_snake_case , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**_UpperCAmelCase )
| 369
|
# Function to print upper half of diamond (pyramid)
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _lowerCAmelCase ( A__: Optional[int] ):
'''simple docstring'''
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__magic_name__ = 1
while K:
__magic_name__ = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__magic_name__ = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 152
| 0
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase__ = getLogger(__name__)
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = 8 , __snake_case = 1024 , __snake_case="val" , __snake_case=None , __snake_case=False , __snake_case="summarization" , __snake_case=None , __snake_case=1 , __snake_case = None , __snake_case="" , **__snake_case , ) -> Dict:
"""simple docstring"""
_lowercase =str(__snake_case )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=__snake_case )
_lowercase =Path(__snake_case )
_lowercase =save_dir.joinpath(F"rank_{local_rank}_output.json" )
torch.cuda.set_device(__snake_case )
_lowercase =AutoModelForSeqaSeqLM.from_pretrained(__snake_case ).cuda()
if fpaa:
_lowercase =model.half()
# determine if we need to increase num_beams
use_task_specific_params(__snake_case , __snake_case ) # update config with task specific params
_lowercase =generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowercase =num_return_sequences
_lowercase =AutoTokenizer.from_pretrained(__snake_case )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowercase =tokenizer.model_max_length
if prefix is None:
_lowercase =prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
_lowercase =SeqaSeqDataset(
__snake_case , __snake_case , __snake_case , max_target_length=1024 , type_path=__snake_case , n_obs=__snake_case , prefix=__snake_case , **__snake_case , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowercase =ds.make_sortish_sampler(__snake_case , distributed=__snake_case , add_extra_examples=__snake_case , shuffle=__snake_case )
_lowercase =DataLoader(__snake_case , sampler=__snake_case , batch_size=__snake_case , collate_fn=ds.collate_fn )
_lowercase =[]
for batch in tqdm(__snake_case ):
_lowercase =model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=__snake_case , num_beams=__snake_case , **__snake_case , )
_lowercase =tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
_lowercase =batch['''ids''']
if num_return_sequences > 1:
_lowercase =chunks(__snake_case , __snake_case ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__snake_case ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(__snake_case , __snake_case )
return results, sampler.num_replicas
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=__snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=__snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=__snake_case , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=__snake_case , default=__snake_case )
parser.add_argument(
'''--type_path''' , type=__snake_case , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=__snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=__snake_case , default=8 , required=__snake_case , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=__snake_case , default=-1 , required=__snake_case , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=__snake_case , default=1 , required=__snake_case , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=__snake_case , default=600 , required=__snake_case , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=__snake_case , default=__snake_case , required=__snake_case )
parser.add_argument('''--tgt_lang''' , type=__snake_case , default=__snake_case , required=__snake_case )
parser.add_argument(
'''--prefix''' , type=__snake_case , required=__snake_case , default=__snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
_lowercase =time.time()
_lowercase , _lowercase =parser.parse_known_args()
_lowercase =parse_numeric_n_bool_cl_kwargs(__snake_case )
if generate_kwargs and args.local_rank <= 0:
print(F"parsed the following generate kwargs: {generate_kwargs}" )
_lowercase =Path(args.save_dir + '''_tmp''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case ) # this handles locking.
_lowercase =list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowercase ={}
if args.src_lang is not None:
_lowercase =args.src_lang
if args.tgt_lang is not None:
_lowercase =args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__snake_case )
_lowercase , _lowercase =eval_data_dir(
args.data_dir , __snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__snake_case , **__snake_case , )
if args.local_rank <= 0:
_lowercase =Path(args.save_dir )
save_dir.mkdir(exist_ok=__snake_case )
_lowercase =gather_results_from_each_node(__snake_case , __snake_case , args.sync_timeout )
_lowercase =combine_partial_results(__snake_case )
if args.num_return_sequences > 1:
_lowercase =save_dir.joinpath('''pseudolabel_results.json''' )
print(F"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__snake_case , __snake_case )
return
_lowercase =Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(__snake_case ) as f:
_lowercase =[x.rstrip() for x in f.readlines()][: len(__snake_case )]
# Calculate metrics, save metrics, and save _generations.txt
_lowercase ='''translation''' in args.task
_lowercase =calculate_bleu if calc_bleu else calculate_rouge
_lowercase ='''bleu''' if calc_bleu else '''rouge'''
_lowercase =score_fn(__snake_case , __snake_case )
_lowercase =len(__snake_case )
_lowercase =time.time() - start_time
_lowercase =round(runtime / metrics['''n_obs'''] , 4 )
_lowercase =num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowercase =save_dir.joinpath(F"{args.type_path}_{metric_name}.json" )
save_json(__snake_case , __snake_case , indent=__snake_case )
print(__snake_case )
write_txt_file(__snake_case , save_dir.joinpath(F"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__snake_case , save_dir.joinpath(F"{args.type_path}.target" ) )
else:
shutil.rmtree(__snake_case )
def UpperCAmelCase_ ( __snake_case ) -> List:
"""simple docstring"""
_lowercase =[]
for partial_result in partial_results:
records.extend(__snake_case )
_lowercase =sorted(__snake_case , key=lambda __snake_case : x["id"] )
_lowercase =[x['''pred'''] for x in records]
return preds
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Dict[str, List]]:
"""simple docstring"""
_lowercase =time.time()
logger.info('''waiting for all nodes to finish''' )
_lowercase =None
while (time.time() - start_wait) < timeout:
_lowercase =list(save_dir.glob('''rank_*.json''' ) )
if len(__snake_case ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowercase =lmap(__snake_case , __snake_case )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 5
|
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5
| 1
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 30
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase, '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase, '''num_attention_heads''' ) )
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=640, lowerCAmelCase=4, lowerCAmelCase="silu", lowerCAmelCase=3, lowerCAmelCase=32, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=10, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =last_hidden_size
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =conv_kernel_size
lowerCamelCase_ =output_stride
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =use_labels
lowerCamelCase_ =is_training
lowerCamelCase_ =num_labels
lowerCamelCase_ =initializer_range
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =MobileViTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileViTForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =MobileViTForSemanticSegmentation(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : List[str] =(
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : int =False
lowercase : str =False
lowercase : Optional[Any] =False
lowercase : Union[str, Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTModelTester(self )
lowerCamelCase_ =MobileViTConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) )
lowerCamelCase_ =outputs.hidden_states
lowerCamelCase_ =5
self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase_ =2
for i in range(len(lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =MobileViTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
# verify the logits
lowerCamelCase_ =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
lowerCamelCase_ =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
], device=lowerCAmelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCAmelCase, atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
lowerCamelCase_ =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits.detach().cpu()
lowerCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase, target_sizes=[(50, 60)] )
lowerCamelCase_ =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, lowerCAmelCase )
lowerCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase )
lowerCamelCase_ =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, lowerCAmelCase )
| 75
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289
| 0
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def UpperCAmelCase ( UpperCAmelCase ) -> int:
snake_case_ = split_dict._to_yaml_list()
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
snake_case_ = SplitDict._from_yaml_list(UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case_ = None
# the split name of split_dict takes over the name of the split info object
snake_case_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def UpperCAmelCase ( UpperCAmelCase ) -> str:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case_ = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 312
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "depth_multiplier" ) )
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : str=0.25 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : Dict=8 , __lowerCamelCase : str=6 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[Any]="relu6" , __lowerCamelCase : int=12_80 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=10 , __lowerCamelCase : List[Any]=None , ) -> str:
a = parent
a = batch_size
a = num_channels
a = image_size
a = depth_multiplier
a = depth_divisible_by
a = min_depth
a = expand_ratio
a = tf_padding
a = output_stride
a = first_layer_is_expansion
a = finegrained_output
a = hidden_act
a = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
a = classifier_dropout_prob
a = use_labels
a = is_training
a = num_labels
a = initializer_range
a = scope
def __UpperCAmelCase ( self : str ) -> Tuple:
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.num_labels )
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> int:
a = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> int:
a = self.num_labels
a = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : str ) -> int:
a = self.num_labels
a = MobileNetVaForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = MobileNetVaModelTester(self )
a = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def __UpperCAmelCase ( self : str ) -> Dict:
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Tuple ) -> int:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
a = outputs.hidden_states
a = 16
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __magic_name__ ( ):
'''simple docstring'''
a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
a = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(__lowerCamelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
a = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
a = model.to(__lowerCamelCase )
a = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
a = outputs.logits
# verify the logits
a = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __lowerCamelCase )
a = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=__lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
| 107
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = KandinskyVaaControlnetPipeline
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> int:
'''simple docstring'''
return 32
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Tuple = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : int = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=__UpperCamelCase , )
snake_case__ : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> int:
'''simple docstring'''
snake_case__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create hint
snake_case__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : int = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = 'cpu'
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
snake_case__ : Dict = output.images
snake_case__ : Any = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
snake_case__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
snake_case__ : List[str] = torch.from_numpy(np.array(__UpperCamelCase ) ).float() / 2_5_5.0
snake_case__ : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
snake_case__ : int = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[int] = 'A robot, 4k photo'
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
snake_case__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
snake_case__ : Dict = pipeline(
image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , hint=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , output_type='np' , )
snake_case__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 143
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class lowercase_ (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 'blip_2_vision_model'
def __init__( self : List[str] ,lowercase__ : int=1_4_0_8 ,lowercase__ : str=6_1_4_4 ,lowercase__ : str=3_9 ,lowercase__ : List[str]=1_6 ,lowercase__ : List[str]=2_2_4 ,lowercase__ : List[Any]=1_4 ,lowercase__ : Dict="gelu" ,lowercase__ : int=0.0_0_0_0_1 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Optional[int]=1e-1_0 ,lowercase__ : Dict=True ,**lowercase__ : Optional[Any] ,):
super().__init__(**lowercase__ )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ,lowercase__ : Union[str, os.PathLike] ,**lowercase__ : Union[str, Any] ):
cls._set_token_in_kwargs(lowercase__ )
__lowercase = cls.get_config_dict(lowercase__ ,**lowercase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase__ ,**lowercase__ )
class lowercase_ (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 'blip_2_qformer'
def __init__( self : Optional[Any] ,lowercase__ : Optional[Any]=3_0_5_2_2 ,lowercase__ : List[Any]=7_6_8 ,lowercase__ : Optional[int]=1_2 ,lowercase__ : Tuple=1_2 ,lowercase__ : Union[str, Any]=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Tuple=0.1 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Any=5_1_2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Tuple=0 ,lowercase__ : Dict="absolute" ,lowercase__ : List[Any]=2 ,lowercase__ : List[Any]=1_4_0_8 ,**lowercase__ : str ,):
super().__init__(pad_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ,lowercase__ : Union[str, os.PathLike] ,**lowercase__ : Any ):
cls._set_token_in_kwargs(lowercase__ )
__lowercase = cls.get_config_dict(lowercase__ ,**lowercase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
__lowercase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase__ ,**lowercase__ )
class lowercase_ (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 'blip-2'
SCREAMING_SNAKE_CASE : Dict = True
def __init__( self : Union[str, Any] ,lowercase__ : Any=None ,lowercase__ : List[str]=None ,lowercase__ : List[Any]=None ,lowercase__ : Dict=3_2 ,**lowercase__ : List[str] ):
super().__init__(**lowercase__ )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
__lowercase = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__lowercase = BlipaVisionConfig(**lowercase__ )
__lowercase = BlipaQFormerConfig(**lowercase__ )
__lowercase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__lowercase = CONFIG_MAPPING[text_model_type](**lowercase__ )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.0_2
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ,lowercase__ : BlipaVisionConfig ,lowercase__ : BlipaQFormerConfig ,lowercase__ : PretrainedConfig ,**lowercase__ : Any ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 350
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Tuple=0 ):
__lowercase = np.random.RandomState(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs['''prompt''']]
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop('''prompt''' )]
__lowercase = pipe.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowercase__ ,return_tensors='''np''' ,)
__lowercase = text_inputs['''input_ids''']
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ['''this is a negative prompt''']
__lowercase = negative_prompt
__lowercase = 3 * [inputs['''prompt''']]
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop('''prompt''' )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowercase__ ,return_tensors='''np''' ,)
__lowercase = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def SCREAMING_SNAKE_CASE ( self : str ):
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=1_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''open neural network exchange'''
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''open neural network exchange'''
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = 0
def test_callback_fn(lowercase__ : int ,lowercase__ : int ,lowercase__ : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''Andromeda galaxy in a bottle'''
__lowercase = np.random.RandomState(0 )
pipe(
prompt=lowercase__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=lowercase__ ,callback=lowercase__ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(lowercase__ ,lowercase__ )
assert pipe.safety_checker is None
__lowercase = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
| 104
|
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0_0_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =2**power
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
while n:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 152
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ) -> List[Any]:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
for i in range(config.num_hidden_layers ):
snake_case_ = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = dct.pop(lowerCAmelCase__ )
snake_case_ = val
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase__ )
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
if "vqa" in checkpoint_url:
snake_case_ = True
snake_case_ = 3129
snake_case_ = """huggingface/label-files"""
snake_case_ = """vqa2-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = ViltForQuestionAnswering(lowerCAmelCase__ )
elif "nlvr" in checkpoint_url:
snake_case_ = True
snake_case_ = 2
snake_case_ = {0: """False""", 1: """True"""}
snake_case_ = {v: k for k, v in config.idalabel.items()}
snake_case_ = 3
snake_case_ = ViltForImagesAndTextClassification(lowerCAmelCase__ )
elif "irtr" in checkpoint_url:
snake_case_ = True
snake_case_ = ViltForImageAndTextRetrieval(lowerCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
snake_case_ = True
snake_case_ = ViltForMaskedLM(lowerCAmelCase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )["""state_dict"""]
snake_case_ = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
if mlm_model or irtr_model:
snake_case_ = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCAmelCase__ )
# Define processor
snake_case_ = ViltImageProcessor(size=384 )
snake_case_ = BertTokenizer.from_pretrained('bert-base-uncased' )
snake_case_ = ViltProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case_ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase__ ).raw )
snake_case_ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase__ ).raw )
snake_case_ = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
snake_case_ = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
snake_case_ = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
snake_case_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
snake_case_ = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowerCAmelCase__ ).raw )
if mlm_model:
snake_case_ = """a bunch of [MASK] laying on a [MASK]."""
else:
snake_case_ = """How many cats are there?"""
snake_case_ = processor(lowerCAmelCase__ , lowerCAmelCase__ , return_tensors='pt' )
snake_case_ = model(**lowerCAmelCase__ )
# Verify outputs
if mlm_model:
snake_case_ = torch.Size([1, 11, 30522] )
snake_case_ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1e-4 )
# verify masked token prediction equals "cats"
snake_case_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case_ = torch.Size([1, 3129] )
snake_case_ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase__ , atol=1e-4 )
# verify vqa prediction equals "2"
snake_case_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case_ = torch.Size([1, 2] )
snake_case_ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
lowercase_ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase_ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase_ = j
return prefix_result
def a ( snake_case__: str ):
'''simple docstring'''
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCamelCase_ ,vae=lowerCamelCase_ ,scheduler=lowerCamelCase_ )
# create a imagenet -> id dictionary for easier use
A = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A = int(lowerCamelCase_ )
A = dict(sorted(self.labels.items() ) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[int]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
A = list(lowerCamelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = 4.0 ,lowerCamelCase_ = None ,lowerCamelCase_ = 5_0 ,lowerCamelCase_ = "pil" ,lowerCamelCase_ = True ,) -> Union[ImagePipelineOutput, Tuple]:
A = len(lowerCamelCase_ )
A = self.transformer.config.sample_size
A = self.transformer.config.in_channels
A = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=lowerCamelCase_ ,device=self.device ,dtype=self.transformer.dtype ,)
A = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A = torch.tensor(lowerCamelCase_ ,device=self.device ).reshape(-1 )
A = torch.tensor([1_0_0_0] * batch_size ,device=self.device )
A = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A = latent_model_input[: len(lowerCamelCase_ ) // 2]
A = torch.cat([half, half] ,dim=0 )
A = self.scheduler.scale_model_input(lowerCamelCase_ ,lowerCamelCase_ )
A = t
if not torch.is_tensor(lowerCamelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A = latent_model_input.device.type == """mps"""
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
A = torch.floataa if is_mps else torch.floataa
else:
A = torch.intaa if is_mps else torch.intaa
A = torch.tensor([timesteps] ,dtype=lowerCamelCase_ ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A = self.transformer(
lowerCamelCase_ ,timestep=lowerCamelCase_ ,class_labels=lowerCamelCase_ ).sample
# perform guidance
if guidance_scale > 1:
A , A = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A , A = torch.split(lowerCamelCase_ ,len(lowerCamelCase_ ) // 2 ,dim=0 )
A = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A = torch.cat([half_eps, half_eps] ,dim=0 )
A = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A , A = torch.split(lowerCamelCase_ ,lowerCamelCase_ ,dim=1 )
else:
A = noise_pred
# compute previous image: x_t -> x_t-1
A = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ).prev_sample
if guidance_scale > 1:
A , A = latent_model_input.chunk(2 ,dim=0 )
else:
A = latent_model_input
A = 1 / self.vae.config.scaling_factor * latents
A = self.vae.decode(lowerCamelCase_ ).sample
A = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 77
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _A ( _a : Callable , _a : float , _a : float , _a : float , _a : float ):
"""simple docstring"""
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(_a ):
A = y[k] + step_size * ode_func(_a , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__a :Optional[int] = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = ['DPTFeatureExtractor']
__a :int = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__a :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A ( __UpperCAmelCase ):
__snake_case = 'trocr'
__snake_case = ['past_key_values']
__snake_case = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=1024, UpperCamelCase__=12, UpperCamelCase__=16, UpperCamelCase__=4096, UpperCamelCase__="gelu", UpperCamelCase__=512, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_model
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = init_std
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = scale_embedding
lowerCAmelCase_ = use_learned_position_embeddings
lowerCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, **UpperCamelCase__, )
| 365
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [int(_A ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_A ) == 4 and all(0 <= int(_A ) <= 254 for octet in octets )
if __name__ == "__main__":
_A = input().strip()
_A = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 167
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class snake_case_ ( __A ):
def __init__( self : List[Any] , **lowercase_ : Union[str, Any] ) -> Tuple:
super().__init__(**lowercase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : str ) -> Optional[int]:
return super().__call__(lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , **lowercase_ : Tuple ) -> Optional[Any]:
lowercase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowercase__ : int = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowercase__ : Dict = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None , lowercase_ : List[str]="This is a photo of {}." ) -> Tuple:
lowercase__ : str = load_image(lowercase_ )
lowercase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Optional[int] = candidate_labels
lowercase__ : Optional[Any] = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
lowercase__ : Union[str, Any] = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
lowercase__ : int = [text_inputs]
return inputs
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Optional[int]:
lowercase__ : Tuple = model_inputs.pop("candidate_labels" )
lowercase__ : List[Any] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowercase_ ):
lowercase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase__ : Any = text_inputs[0][0]
lowercase__ : List[str] = self.model(**lowercase_ , **lowercase_ )
lowercase__ : str = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __UpperCamelCase ( self : str , lowercase_ : int ) -> List[Any]:
lowercase__ : Any = model_outputs.pop("candidate_labels" )
lowercase__ : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
lowercase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : str = probs.tolist()
if not isinstance(lowercase_ , lowercase_ ):
lowercase__ : Union[str, Any] = [scores]
elif self.framework == "tf":
lowercase__ : Optional[Any] = stable_softmax(lowercase_ , axis=-1 )
lowercase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : Optional[Any] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 87
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : Union[str, Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : str = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : Dict = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : Optional[int] = scope
UpperCamelCase : int = range_bbox
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase : Union[str, Any] = bbox[i, j, 3]
UpperCamelCase : int = bbox[i, j, 1]
UpperCamelCase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase : List[str] = bbox[i, j, 2]
UpperCamelCase : Optional[int] = bbox[i, j, 0]
UpperCamelCase : Optional[Any] = t
UpperCamelCase : Dict = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = LiltModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ )
UpperCamelCase : Any = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Dict = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Union[str, Any] = False
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
return True
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = LiltModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Dict = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ )
UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ )
UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ )
UpperCamelCase : List[str] = torch.Size([1, 2, 768] )
UpperCamelCase : Any = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="lxmert"
UpperCAmelCase_ ={}
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=9500 , _A=1600 , _A=400 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.02 , _A=1E-12 , _A=9 , _A=5 , _A=5 , _A=2048 , _A=4 , _A=6.67 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=True , _A=True , **_A , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = num_qa_labels
SCREAMING_SNAKE_CASE_ = num_object_labels
SCREAMING_SNAKE_CASE_ = num_attr_labels
SCREAMING_SNAKE_CASE_ = l_layers
SCREAMING_SNAKE_CASE_ = x_layers
SCREAMING_SNAKE_CASE_ = r_layers
SCREAMING_SNAKE_CASE_ = visual_feat_dim
SCREAMING_SNAKE_CASE_ = visual_pos_dim
SCREAMING_SNAKE_CASE_ = visual_loss_normalizer
SCREAMING_SNAKE_CASE_ = task_matched
SCREAMING_SNAKE_CASE_ = task_mask_lm
SCREAMING_SNAKE_CASE_ = task_obj_predict
SCREAMING_SNAKE_CASE_ = task_qa
SCREAMING_SNAKE_CASE_ = visual_obj_loss
SCREAMING_SNAKE_CASE_ = visual_attr_loss
SCREAMING_SNAKE_CASE_ = visual_feat_loss
SCREAMING_SNAKE_CASE_ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_A )
| 362
|
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowerCAmelCase_ = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case_ = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case_ = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case_ = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
snake_case_ = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case_ = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case_ = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case_ = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case_ = '''rwkv.''' + name
snake_case_ = weight
return state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case_ = 50277
snake_case_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case_ = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
snake_case_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case_ = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
snake_case_ = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
snake_case_ = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
snake_case_, snake_case_ = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 8
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 0
|
def _lowerCamelCase ( lowercase : float , lowercase : int ) -> float:
if digit_amount > 0:
return round(number - int(lowercase ) , lowercase )
return number - int(lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 368
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346
| 0
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if len(_lowerCAmelCase ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
lowercase__ : int = (left + right) >> 1 # the middle
lowercase__ : Dict = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
lowercase__ : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 77
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(_a)
class UpperCAmelCase_ ( _a):
def __init__( self , *a , **a ) -> Union[str, Any]:
super().__init__(*a , **a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _UpperCAmelCase ( self , a=None ) -> Dict:
lowercase__ : Any = {}
if top_k is not None:
lowercase__ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self , a , **a ) -> Tuple:
return super().__call__(a , **a )
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : List[Any] = load_image(a )
lowercase__ : Union[str, Any] = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ : Dict = self.model(**a )
return model_outputs
def _UpperCAmelCase ( self , a , a=5 ) -> Dict:
if top_k > self.model.config.num_labels:
lowercase__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : Tuple = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ : Optional[Any] = probs.topk(a )
elif self.framework == "tf":
lowercase__ : Union[str, Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase__ : str = tf.math.top_k(a , k=a )
lowercase__ , lowercase__ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
lowercase__ : Dict = scores.tolist()
lowercase__ : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a )]
| 77
| 1
|
from collections.abc import Sequence
def _a ( SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : bool = False ):
if not arr:
return 0
__lowerCAmelCase = 0 if allow_empty_subarrays else float("-inf" )
__lowerCAmelCase = 0.0
for num in arr:
__lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 355
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=_A , return_dict=_A )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "google/ncsnpp-church-256"
__lowerCAmelCase = UNetaDModel.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(_A )
__lowerCAmelCase = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=_A ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 102
| 0
|
from pathlib import Path
import numpy as np
from PIL import Image
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def a ( _UpperCAmelCase : List[Any] ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def a ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros_like(_UpperCAmelCase )
__UpperCAmelCase : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__UpperCAmelCase : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__UpperCAmelCase : int = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__UpperCAmelCase : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__A =Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__A =np.array(Image.open(lena_path))
# kernel to be applied
__A =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__A =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__A =Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 226
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Union[str, Any] = """transfo-xl"""
__lowerCAmelCase : Optional[Any] = ["""mems"""]
__lowerCAmelCase : List[str] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , _lowerCamelCase : List[Any]=26_77_35 , _lowerCamelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _lowerCamelCase : str=10_24 , _lowerCamelCase : Union[str, Any]=10_24 , _lowerCamelCase : Union[str, Any]=16 , _lowerCamelCase : int=64 , _lowerCamelCase : Optional[int]=40_96 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : str=False , _lowerCamelCase : Union[str, Any]=18 , _lowerCamelCase : Optional[Any]=16_00 , _lowerCamelCase : Optional[int]=10_00 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=0 , _lowerCamelCase : List[Any]=-1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]="normal" , _lowerCamelCase : int=0.01 , _lowerCamelCase : List[str]=0.01 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-5 , _lowerCamelCase : int=0 , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
A_ : Optional[Any] = vocab_size
A_ : str = []
self.cutoffs.extend(_lowerCamelCase )
if proj_share_all_but_first:
A_ : str = [False] + [True] * len(self.cutoffs )
else:
A_ : str = [False] + [False] * len(self.cutoffs )
A_ : Optional[Any] = d_model
A_ : Dict = d_embed
A_ : List[str] = d_head
A_ : List[Any] = d_inner
A_ : Dict = div_val
A_ : int = pre_lnorm
A_ : Optional[Any] = n_layer
A_ : List[Any] = n_head
A_ : List[Any] = mem_len
A_ : Dict = same_length
A_ : Optional[Any] = attn_type
A_ : Any = clamp_len
A_ : Dict = sample_softmax
A_ : List[Any] = adaptive
A_ : Union[str, Any] = dropout
A_ : List[Any] = dropatt
A_ : Any = untie_r
A_ : Optional[int] = init
A_ : int = init_range
A_ : List[Any] = proj_init_std
A_ : Union[str, Any] = init_std
A_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 167
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__lowerCAmelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 10_00,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 10_00,
"block_out_channels": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase = {
"sample_size": 2_56,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowerCAmelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__lowerCAmelCase = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__lowerCAmelCase = {
"num_train_timesteps": 1_51,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def snake_case_ ( snake_case ) -> List[str]:
if isinstance(A__ , A__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case=False ) -> Tuple:
lowercase__: Dict = checkpoint[f'{old_prefix}.in_layers.0.weight']
lowercase__: Any = checkpoint[f'{old_prefix}.in_layers.0.bias']
lowercase__: Tuple = checkpoint[f'{old_prefix}.in_layers.2.weight']
lowercase__: Optional[int] = checkpoint[f'{old_prefix}.in_layers.2.bias']
lowercase__: Optional[int] = checkpoint[f'{old_prefix}.emb_layers.1.weight']
lowercase__: List[Any] = checkpoint[f'{old_prefix}.emb_layers.1.bias']
lowercase__: Tuple = checkpoint[f'{old_prefix}.out_layers.0.weight']
lowercase__: Optional[Any] = checkpoint[f'{old_prefix}.out_layers.0.bias']
lowercase__: Union[str, Any] = checkpoint[f'{old_prefix}.out_layers.3.weight']
lowercase__: Optional[Any] = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
lowercase__: Any = checkpoint[f'{old_prefix}.skip_connection.weight']
lowercase__: Union[str, Any] = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case=None ) -> Union[str, Any]:
lowercase__: str = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
lowercase__: List[str] = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
lowercase__: Any = checkpoint[f'{old_prefix}.norm.weight']
lowercase__: Union[str, Any] = checkpoint[f'{old_prefix}.norm.bias']
lowercase__: Any = weight_q.squeeze(-1 ).squeeze(-1 )
lowercase__: Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
lowercase__: Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowercase__: Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowercase__: Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowercase__: int = bias_v.squeeze(-1 ).squeeze(-1 )
lowercase__: List[Any] = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
lowercase__: Dict = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( snake_case , snake_case ) -> Tuple:
lowercase__: Optional[Any] = torch.load(A__ , map_location='cpu' )
lowercase__: Tuple = {}
lowercase__: Optional[Any] = checkpoint["""time_embed.0.weight"""]
lowercase__: List[str] = checkpoint["""time_embed.0.bias"""]
lowercase__: Union[str, Any] = checkpoint["""time_embed.2.weight"""]
lowercase__: Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowercase__: List[Any] = checkpoint["""label_emb.weight"""]
lowercase__: List[str] = checkpoint["""input_blocks.0.0.weight"""]
lowercase__: Any = checkpoint["""input_blocks.0.0.bias"""]
lowercase__: int = unet_config["""down_block_types"""]
lowercase__: List[Any] = unet_config["""layers_per_block"""]
lowercase__: int = unet_config["""attention_head_dim"""]
lowercase__: Dict = unet_config["""block_out_channels"""]
lowercase__: Optional[Any] = 1
lowercase__: Tuple = channels_list[0]
for i, layer_type in enumerate(A__ ):
lowercase__: Optional[int] = channels_list[i]
lowercase__: Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A__ ):
lowercase__: List[Any] = f'down_blocks.{i}.resnets.{j}'
lowercase__: Union[str, Any] = f'input_blocks.{current_layer}.0'
lowercase__: Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
lowercase__: Dict = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A__ ):
lowercase__: Dict = f'down_blocks.{i}.resnets.{j}'
lowercase__: Tuple = f'input_blocks.{current_layer}.0'
lowercase__: str = True if j == 0 and downsample_block_has_skip else False
lowercase__: List[str] = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
lowercase__: Union[str, Any] = f'down_blocks.{i}.attentions.{j}'
lowercase__: List[str] = f'input_blocks.{current_layer}.1'
lowercase__: Optional[int] = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowercase__: Union[str, Any] = f'down_blocks.{i}.downsamplers.0'
lowercase__: str = f'input_blocks.{current_layer}.0'
lowercase__: Optional[int] = convert_resnet(A__ , A__ , A__ , A__ )
current_layer += 1
lowercase__: List[str] = current_channels
# hardcoded the mid-block for now
lowercase__: str = """mid_block.resnets.0"""
lowercase__: Optional[Any] = """middle_block.0"""
lowercase__: int = convert_resnet(A__ , A__ , A__ , A__ )
lowercase__: Dict = """mid_block.attentions.0"""
lowercase__: str = """middle_block.1"""
lowercase__: List[Any] = convert_attention(A__ , A__ , A__ , A__ , A__ )
lowercase__: Optional[int] = """mid_block.resnets.1"""
lowercase__: Tuple = """middle_block.2"""
lowercase__: Tuple = convert_resnet(A__ , A__ , A__ , A__ )
lowercase__: Any = 0
lowercase__: Tuple = unet_config["""up_block_types"""]
for i, layer_type in enumerate(A__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowercase__: Optional[Any] = f'up_blocks.{i}.resnets.{j}'
lowercase__: str = f'output_blocks.{current_layer}.0'
lowercase__: Dict = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowercase__: Any = f'up_blocks.{i}.upsamplers.0'
lowercase__: Dict = f'output_blocks.{current_layer-1}.1'
lowercase__: List[Any] = convert_resnet(A__ , A__ , A__ , A__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowercase__: str = f'up_blocks.{i}.resnets.{j}'
lowercase__: Optional[int] = f'output_blocks.{current_layer}.0'
lowercase__: Any = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
lowercase__: Tuple = f'up_blocks.{i}.attentions.{j}'
lowercase__: Dict = f'output_blocks.{current_layer}.1'
lowercase__: Union[str, Any] = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowercase__: Optional[int] = f'up_blocks.{i}.upsamplers.0'
lowercase__: Tuple = f'output_blocks.{current_layer-1}.2'
lowercase__: str = convert_resnet(A__ , A__ , A__ , A__ )
lowercase__: Union[str, Any] = checkpoint["""out.0.weight"""]
lowercase__: Any = checkpoint["""out.0.bias"""]
lowercase__: str = checkpoint["""out.2.weight"""]
lowercase__: List[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = strabool(args.class_cond)
__lowerCAmelCase = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__lowerCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__lowerCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__lowerCAmelCase = None
__lowerCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__lowerCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__lowerCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__lowerCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowerCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__lowerCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__lowerCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 351
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = parent
lowercase__: str = batch_size
lowercase__: Dict = seq_length
lowercase__: str = is_training
lowercase__: List[str] = use_input_mask
lowercase__: str = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: int = vocab_size
lowercase__: Dict = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[str] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Optional[Any] = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Optional[int] = num_labels
lowercase__: Union[str, Any] = num_choices
lowercase__: int = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[str] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[int] = None
if self.use_token_type_ids:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Tuple = None
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): Tuple = self.prepare_config_and_inputs()
lowercase__: Optional[int] = True
lowercase__: Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: List[Any] = NezhaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowercase__: str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = True
lowercase__: Optional[Any] = NezhaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
lowercase__: str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = NezhaForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Any = NezhaForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: Union[str, Any] = NezhaForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = NezhaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = self.num_labels
lowercase__: List[Any] = NezhaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Dict = NezhaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = self.num_choices
lowercase__: str = NezhaForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase__: Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): str = config_and_inputs
lowercase__: Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : List[Any] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase : Any = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
lowercase__: int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = NezhaModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__: str = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = NezhaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase__: Optional[int] = True
lowercase__: Optional[int] = model_class(config=lowerCAmelCase__ )
lowercase__: Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'bert.pt' ) )
lowercase__: List[str] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'bert.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: Optional[Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowercase__: Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__: Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
lowercase__: int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 288
| 0
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ : str = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Optional[int] = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase_ : str = spec.loader.load_module()
UpperCAmelCase_ : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase_ : Optional[Any] = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCAmelCase_ : List[Any] = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def _A () -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : Dict = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : str = inspect.getsource(a__ )
SCREAMING_SNAKE_CASE_ : str = _re_checkpoint.findall(a__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : List[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
break
SCREAMING_SNAKE_CASE_ : List[str] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a__ )
if len(a__ ) > 0:
SCREAMING_SNAKE_CASE_ : int = '''\n'''.join(sorted(a__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 91
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] =input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
lowerCAmelCase__ : int =BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ : Union[str, Any] =soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCAmelCase__ : int =requests.get(image_url).content
lowerCAmelCase__ : Optional[int] =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 257
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_00 ):
__UpperCamelCase =3
__UpperCamelCase =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 117
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : float = 3.0
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def _a ( self ) -> Union[str, Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__UpperCamelCase =Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__UpperCamelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def _a ( self ) -> Optional[int]:
__UpperCamelCase =['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
_A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_A = Accelerator(kwargs_handlers=[ddp_scaler])
_A = torch.nn.Linear(100, 200)
_A = accelerator.prepare(model)
# Check the values changed in kwargs
_A = ''
_A = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 117
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=[] ):
__UpperCamelCase =size[0] - overlap_pixels * 2
__UpperCamelCase =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__UpperCamelCase =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__UpperCamelCase =np.pad(SCREAMING_SNAKE_CASE__ , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE__ , end_values=0 )
if "l" in remove_borders:
__UpperCamelCase =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__UpperCamelCase =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__UpperCamelCase =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__UpperCamelCase =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
return max(SCREAMING_SNAKE_CASE__ , min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : [int] ):
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__UpperCamelCase =clamp_rect(SCREAMING_SNAKE_CASE__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE__ , (original_slice, 0) )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
__UpperCamelCase =tile.crop(SCREAMING_SNAKE_CASE__ )
return tile
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =n % d
return n - divisor
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = 350 , ) -> Union[str, Any]:
super().__init__(
vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , low_res_scheduler=A_ , scheduler=A_ , max_noise_level=A_ , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__UpperCamelCase =add_overlap_rect(A_ , A_ , image.size )
__UpperCamelCase =image.crop(A_ )
__UpperCamelCase =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__UpperCamelCase =translated_slice_x - (original_image_slice / 2)
__UpperCamelCase =max(0 , A_ )
__UpperCamelCase =squeeze_tile(A_ , A_ , A_ , A_ )
__UpperCamelCase =to_input.size
__UpperCamelCase =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__UpperCamelCase =super(A_ , self ).__call__(image=A_ , **A_ ).images[0]
__UpperCamelCase =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__UpperCamelCase =unsqueeze_tile(A_ , A_ )
__UpperCamelCase =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__UpperCamelCase =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__UpperCamelCase =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A_ ) , mode='L' , )
final_image.paste(
A_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A_ )
@torch.no_grad()
def __call__( self , A_ , A_ , A_ = 75 , A_ = 9.0 , A_ = 50 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 128 , A_ = 32 , A_ = 32 , ) -> Tuple:
__UpperCamelCase =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__UpperCamelCase =math.ceil(image.size[0] / tile_size )
__UpperCamelCase =math.ceil(image.size[1] / tile_size )
__UpperCamelCase =tcx * tcy
__UpperCamelCase =0
for y in range(A_ ):
for x in range(A_ ):
self._process_tile(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , prompt=A_ , num_inference_steps=A_ , guidance_scale=A_ , noise_level=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _UpperCAmelCase ( ):
# Run a demo
__UpperCamelCase ='stabilityai/stable-diffusion-x4-upscaler'
__UpperCamelCase =StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='fp16' , torch_dtype=torch.floataa )
__UpperCamelCase =pipe.to('cuda' )
__UpperCamelCase =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE__ : List[str] ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
__UpperCamelCase =pipe(image=SCREAMING_SNAKE_CASE__ , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 62
|
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __magic_name__ ( __a : int , __a : str , __a : Dict , __a : str , __a : int ):
'''simple docstring'''
with open(__a ) as metadata_file:
UpperCamelCase__ = json.load(__a )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__a , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__a , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__a )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("""<ent>""" , lstrip=__a , rstrip=__a )
UpperCamelCase__ = AddedToken("""<ent2>""" , lstrip=__a , rstrip=__a )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a , """tokenizer_config.json""" ) , """r""" ) as f:
UpperCamelCase__ = json.load(__a )
UpperCamelCase__ = """MLukeTokenizer"""
with open(os.path.join(__a , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__a , __a )
with open(os.path.join(__a , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__a , __a )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
UpperCamelCase__ = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = f"encoder.layer.{layer_index}.attention.self."
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase__ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["""entity_predictions.bias"""]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__a ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__a , strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__a , task="""entity_classification""" )
UpperCamelCase__ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__a , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase__ = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__a )
UpperCamelCase__ = """Tokyo is the capital of <mask>."""
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__a , entity_spans=[span] , return_tensors="""pt""" )
UpperCamelCase__ = model(**__a )
UpperCamelCase__ = encoding["""input_ids"""][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__a ) )
model.save_pretrained(__a )
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
UpperCamelCase__ = [json.loads(__a ) for line in open(__a )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = f"{language}:{entity_name}"
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCamelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 178
|
from __future__ import annotations
def __magic_name__ ( __a : list[list[int]] ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__a ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__a ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
| 1
|
"""simple docstring"""
__UpperCamelCase = 9.80665
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 69
|
"""simple docstring"""
import math
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _snake_case : float = 0.1 ) ->int:
"""simple docstring"""
__snake_case : Tuple = 3
__snake_case : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
| 0
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> List[str]:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE :Tuple = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __magic_name__ ( snake_case ):
@staticmethod
def UpperCAmelCase_ ( _lowercase )-> Optional[int]:
UpperCamelCase_ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_lowercase , required=_lowercase , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_lowercase , required=_lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_lowercase , required=_lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_lowercase , default=_lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , )-> Union[str, Any]:
UpperCamelCase_ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase_ = model_type
UpperCamelCase_ = tf_checkpoint
UpperCamelCase_ = pytorch_dump_output
UpperCamelCase_ = config
UpperCamelCase_ = finetuning_task_name
def UpperCAmelCase_ ( self )-> Optional[int]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
else:
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 351
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def UpperCAmelCase_ ( *_lowercase , **_lowercase )-> Optional[int]:
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase ):
@require_torch
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
[
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
{"score": 0.333, "label": ANY(_lowercase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = image_classifier(_lowercase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
UpperCamelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 60
| 0
|
import math
import tensorflow as tf
from packaging import version
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = tf.convert_to_tensor(snake_case )
_lowerCAmelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = tf.convert_to_tensor(snake_case )
_lowerCAmelCase = tf.cast(math.pi , x.dtype )
_lowerCAmelCase = tf.cast(0.044_715 , x.dtype )
_lowerCAmelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(snake_case , 3 )) ))
return x * cdf
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = tf.convert_to_tensor(snake_case )
return x * tf.tanh(tf.math.softplus(snake_case ) )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = tf.convert_to_tensor(snake_case )
_lowerCAmelCase = tf.cast(0.044_715 , x.dtype )
_lowerCAmelCase = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = tf.convert_to_tensor(snake_case )
_lowerCAmelCase = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return tf.clip_by_value(_gelu(snake_case ) , -10 , 10 )
def _UpperCAmelCase ( snake_case , snake_case=-1 ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = tf.split(snake_case , 2 , axis=snake_case )
return a * tf.math.sigmoid(snake_case )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
return tf.keras.activations.gelu(snake_case , approximate=snake_case )
A__ = tf.keras.activations.gelu
A__ = approximate_gelu_wrap
else:
A__ = _gelu
A__ = _gelu_new
A__ = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 82
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase__ = model.state_dict()
UpperCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 288
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( A__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =GPTSanJapaneseTokenizer
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] ={"do_clean_text": False, "add_prefix_space": False}
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
__UpperCamelCase : Dict =["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCamelCase : List[str] ={"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCamelCase : int ={"""unk_token""": """<unk>"""}
__UpperCamelCase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] ="""こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCamelCase : Optional[int] ="""こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_input_output_texts(__A )
__UpperCamelCase : List[str] =tokenizer.encode(__A , add_special_tokens=__A )
__UpperCamelCase : str =tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowercase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def __lowercase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def __lowercase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.get_tokenizer()
# Testing tokenization
__UpperCamelCase : Optional[int] ="""こんにちは、世界。 こんばんは、㔺界。"""
__UpperCamelCase : Any =["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCamelCase : Tuple =tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
__UpperCamelCase : List[Any] =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCamelCase : List[str] =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
__UpperCamelCase : Any =tokens + [tokenizer.unk_token]
__UpperCamelCase : Union[str, Any] =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCamelCase : Union[str, Any] =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.get_tokenizer()
# Testing tokenization
__UpperCamelCase : Optional[int] ="""こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCamelCase : str ="""こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCamelCase : str =tokenizer.encode(__A )
__UpperCamelCase : Dict =tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__UpperCamelCase : Optional[Any] ="""こんにちは、世界。"""
__UpperCamelCase : Any ="""こんばんは、㔺界。😀"""
__UpperCamelCase : Optional[Any] ="""こんにちは、世界。こんばんは、世界。😀"""
__UpperCamelCase : List[Any] =tokenizer.encode(prefix_text + input_text )
__UpperCamelCase : List[str] =tokenizer.encode('' , prefix_text=prefix_text + input_text )
__UpperCamelCase : int =tokenizer.encode(__A , prefix_text=__A )
__UpperCamelCase : int =tokenizer.decode(__A )
__UpperCamelCase : Dict =tokenizer.decode(__A )
__UpperCamelCase : Tuple =tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__UpperCamelCase : List[Any] ="""こんにちは、世界。"""
__UpperCamelCase : Optional[int] ="""こんばんは、㔺界。😀"""
__UpperCamelCase : List[str] =len(tokenizer.encode(__A ) ) - 2
__UpperCamelCase : Dict =len(tokenizer.encode(__A ) ) - 2
__UpperCamelCase : int =[1] + [0] * (len_prefix + len_text + 1)
__UpperCamelCase : List[Any] =[1] * (len_prefix + len_text + 1) + [0]
__UpperCamelCase : Dict =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCamelCase : List[Any] =tokenizer(prefix_text + input_text ).token_type_ids
__UpperCamelCase : List[str] =tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCamelCase : List[Any] =tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__UpperCamelCase : int =tokenizer.encode('あンいワ' )
__UpperCamelCase : Optional[Any] =tokenizer.encode('' , prefix_text='あンいワ' )
__UpperCamelCase : int =tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__UpperCamelCase : int =[["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCamelCase : Dict =tokenizer(__A , padding=__A )
__UpperCamelCase : Any =tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
__UpperCamelCase : int =[[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCamelCase : List[str] =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCamelCase : int =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
pass
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 245
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """"""
lowerCAmelCase__ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__(self :List[Any] , _UpperCamelCase :Optional[DatasetInfo] = None , _UpperCamelCase :Optional[str] = None , **_UpperCamelCase :Any , )-> str:
super().__init__(self , **_UpperCamelCase )
__A = repo_info
__A = token
__A = None
def _lowerCAmelCase (self :Any )-> Dict:
if self.dir_cache is None:
__A = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__A = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_UpperCamelCase ): {'''name''': str(_UpperCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :str = "rb" , **_UpperCamelCase :Tuple , )-> Optional[Any]:
if not isinstance(self.repo_info , _UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__A = hf_hub_url(self.repo_info.id , _UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCamelCase , mode=_UpperCamelCase , headers=get_authentication_headers_for_url(_UpperCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Optional[Any] , **_UpperCamelCase :List[str] )-> Any:
self._get_dirs()
__A = self._strip_protocol(_UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCamelCase )
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Any , _UpperCamelCase :List[str]=False , **_UpperCamelCase :Optional[int] )-> str:
self._get_dirs()
__A = PurePosixPath(path.strip('''/''' ) )
__A = {}
for p, f in self.dir_cache.items():
__A = PurePosixPath(p.strip('''/''' ) )
__A = p.parent
if root == path:
__A = f
__A = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 117
|
from __future__ import annotations
def _a ( lowerCamelCase: list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(lowerCamelCase , [] , 0 , [0 for i in range(len(lowerCamelCase ) )] )
def _a ( lowerCamelCase: list[int | str] , lowerCamelCase: list[int | str] , lowerCamelCase: int , lowerCamelCase: list[int] , ) -> None:
'''simple docstring'''
if index == len(lowerCamelCase ):
print(lowerCamelCase )
return
for i in range(len(lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A = True
create_state_space_tree(lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase )
current_sequence.pop()
__A = False
snake_case__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 117
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( __UpperCAmelCase ):
__A : Dict = (PNDMScheduler,)
__A : Union[str, Any] = (("num_inference_steps", 50),)
def __snake_case ( self : Union[str, Any] , **snake_case__ : Dict ):
'''simple docstring'''
lowercase :Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case__ )
return config
def __snake_case ( self : int , snake_case__ : Optional[int]=0 , **snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[Any] = dict(self.forward_default_kwargs )
lowercase :int = kwargs.pop('''num_inference_steps''' , snake_case__ )
lowercase :Optional[Any] = self.dummy_sample
lowercase :Optional[Any] = 0.1 * sample
lowercase :Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase :Union[str, Any] = self.get_scheduler_config(**snake_case__ )
lowercase :Dict = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowercase :Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowercase :Dict = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowercase :List[Any] = dummy_past_residuals[:]
lowercase :Dict = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase :str = new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase :List[Any] = scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase :int = new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : List[Any] ):
'''simple docstring'''
pass
def __snake_case ( self : int , snake_case__ : Tuple=0 , **snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Union[str, Any] = dict(self.forward_default_kwargs )
lowercase :Union[str, Any] = kwargs.pop('''num_inference_steps''' , snake_case__ )
lowercase :int = self.dummy_sample
lowercase :Dict = 0.1 * sample
lowercase :str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase :List[Any] = self.get_scheduler_config()
lowercase :Dict = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase :int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowercase :Dict = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase :Any = dummy_past_residuals[:]
lowercase :List[str] = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase :int = new_scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase :List[Any] = scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowercase :Optional[Any] = new_scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : Union[str, Any] , **snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Tuple = self.scheduler_classes[0]
lowercase :List[Any] = self.get_scheduler_config(**snake_case__ )
lowercase :List[str] = scheduler_class(**snake_case__ )
lowercase :Optional[int] = 1_0
lowercase :List[str] = self.dummy_model()
lowercase :Tuple = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase :int = model(snake_case__ , snake_case__ )
lowercase :str = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase :Optional[int] = model(snake_case__ , snake_case__ )
lowercase :List[str] = scheduler.step_plms(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :Dict = dict(self.forward_default_kwargs )
lowercase :Union[str, Any] = kwargs.pop('''num_inference_steps''' , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowercase :Any = self.get_scheduler_config()
lowercase :Union[str, Any] = scheduler_class(**snake_case__ )
lowercase :Union[str, Any] = self.dummy_sample
lowercase :Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , '''set_timesteps''' ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , '''set_timesteps''' ):
lowercase :Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase :List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase :Tuple = dummy_past_residuals[:]
lowercase :List[str] = scheduler.step_prk(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample
lowercase :Any = scheduler.step_prk(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase :Tuple = scheduler.step_plms(snake_case__ , 0 , snake_case__ , **snake_case__ ).prev_sample
lowercase :Dict = scheduler.step_plms(snake_case__ , 1 , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self : List[str] ):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
lowercase :Union[str, Any] = self.scheduler_classes[0]
lowercase :List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase :Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=snake_case__ )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :List[Any] = 2_7
for scheduler_class in self.scheduler_classes:
lowercase :Optional[Any] = self.dummy_sample
lowercase :Optional[int] = 0.1 * sample
lowercase :int = self.get_scheduler_config()
lowercase :str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase :Optional[int] = scheduler.step_prk(snake_case__ , snake_case__ , snake_case__ ).prev_sample
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
lowercase :List[Any] = self.scheduler_classes[0]
lowercase :str = self.get_scheduler_config()
lowercase :List[str] = scheduler_class(**snake_case__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
lowercase :Optional[int] = self.full_loop()
lowercase :Optional[int] = torch.sum(torch.abs(snake_case__ ) )
lowercase :Tuple = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :List[Any] = self.full_loop(prediction_type='''v_prediction''' )
lowercase :Any = torch.sum(torch.abs(snake_case__ ) )
lowercase :Any = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
lowercase :Optional[int] = torch.sum(torch.abs(snake_case__ ) )
lowercase :Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
lowercase :Any = torch.sum(torch.abs(snake_case__ ) )
lowercase :Tuple = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 172
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : List[Any] = "retribert"
def __init__( self : Dict , snake_case__ : Union[str, Any]=3_0_5_2_2 , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : Optional[Any]=8 , snake_case__ : int=1_2 , snake_case__ : Optional[int]=3_0_7_2 , snake_case__ : Any="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=1e-1_2 , snake_case__ : Any=True , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[int]=0 , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowercase :Any = vocab_size
lowercase :Optional[Any] = hidden_size
lowercase :str = num_hidden_layers
lowercase :List[str] = num_attention_heads
lowercase :Union[str, Any] = hidden_act
lowercase :Any = intermediate_size
lowercase :str = hidden_dropout_prob
lowercase :str = attention_probs_dropout_prob
lowercase :Optional[Any] = max_position_embeddings
lowercase :Union[str, Any] = type_vocab_size
lowercase :Any = initializer_range
lowercase :int = layer_norm_eps
lowercase :List[str] = share_encoders
lowercase :Union[str, Any] = projection_dim
| 172
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( _UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
_A = BlenderbotSmallTokenizer
_A = False
def _lowerCamelCase ( self :Dict ) -> int:
super().setUp()
__UpperCamelCase : int = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__UpperCamelCase : List[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : List[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__UpperCamelCase : List[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def _lowerCamelCase ( self :Union[str, Any] , **a :List[str] ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self :Union[str, Any] , a :str ) -> Any:
__UpperCamelCase : List[str] = '''adapt act apte'''
__UpperCamelCase : Any = '''adapt act apte'''
return input_text, output_text
def _lowerCamelCase ( self :List[str] ) -> Optional[int]:
__UpperCamelCase : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : Optional[int] = '''adapt act apte'''
__UpperCamelCase : Tuple = ['''adapt''', '''act''', '''ap@@''', '''te''']
__UpperCamelCase : Tuple = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Optional[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Optional[int] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self :int ) -> List[Any]:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = '''I am a small frog.'''
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['''input_ids''']
__UpperCamelCase : Optional[int] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Dict = '''I am a small frog .'''
__UpperCamelCase : Tuple = '''.'''
__UpperCamelCase : List[Any] = tok(_UpperCAmelCase )['''input_ids''']
__UpperCamelCase : Union[str, Any] = tok(_UpperCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 232
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A = "UperNetConfig"
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1 , ):
super().__init__()
lowercase__: List[Any] = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , bias=_UpperCAmelCase , dilation=_UpperCAmelCase , )
lowercase__: List[Any] = nn.BatchNormad(_UpperCAmelCase )
lowercase__: int = nn.ReLU()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = self.conv(_UpperCAmelCase )
lowercase__: Optional[int] = self.batch_norm(_UpperCAmelCase )
lowercase__: List[Any] = self.activation(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = [
nn.AdaptiveAvgPoolad(_UpperCAmelCase ),
UperNetConvModule(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = input
for layer in self.layers:
lowercase__: Any = layer(_UpperCAmelCase )
return hidden_state
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: int = pool_scales
lowercase__: Optional[Any] = align_corners
lowercase__: Optional[int] = in_channels
lowercase__: Optional[Any] = channels
lowercase__: List[Any] = []
for i, pool_scale in enumerate(_UpperCAmelCase ):
lowercase__: Optional[int] = UperNetPyramidPoolingBlock(pool_scale=_UpperCAmelCase , in_channels=_UpperCAmelCase , channels=_UpperCAmelCase )
self.blocks.append(_UpperCAmelCase )
self.add_module(str(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = []
for ppm in self.blocks:
lowercase__: Tuple = ppm(_UpperCAmelCase )
lowercase__: Any = nn.functional.interpolate(
_UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_UpperCAmelCase )
return ppm_outs
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
lowercase__: Optional[int] = config
lowercase__: int = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__: Optional[int] = in_channels
lowercase__: List[str] = config.hidden_size
lowercase__: List[str] = False
lowercase__: List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__: Dict = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__: int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__: List[Any] = nn.ModuleList()
lowercase__: Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__: int = UperNetConvModule(_UpperCAmelCase , self.channels , kernel_size=1 )
lowercase__: Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_UpperCAmelCase )
self.fpn_convs.append(_UpperCAmelCase )
lowercase__: Any = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = inputs[-1]
lowercase__: str = [x]
psp_outs.extend(self.psp_modules(_UpperCAmelCase ) )
lowercase__: Dict = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Tuple = self.bottleneck(_UpperCAmelCase )
return output
def _snake_case ( self , _UpperCAmelCase ):
# build laterals
lowercase__: Dict = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_UpperCAmelCase ) )
# build top-down path
lowercase__: int = len(_UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: str = laterals[i - 1].shape[2:]
lowercase__: Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
lowercase__: str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__: Any = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
lowercase__: int = torch.cat(_UpperCAmelCase , dim=1 )
lowercase__: Tuple = self.fpn_bottleneck(_UpperCAmelCase )
lowercase__: Dict = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 ):
super().__init__()
lowercase__: Optional[Any] = config
lowercase__: Optional[Any] = config.auxiliary_in_channels
lowercase__: List[Any] = config.auxiliary_channels
lowercase__: Tuple = config.auxiliary_num_convs
lowercase__: Any = config.auxiliary_concat_input
lowercase__: Optional[int] = in_index
lowercase__: Tuple = (kernel_size // 2) * dilation
lowercase__: Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=_UpperCAmelCase , dilation=_UpperCAmelCase ) )
if self.num_convs == 0:
lowercase__: List[Any] = nn.Identity()
else:
lowercase__: Union[str, Any] = nn.Sequential(*_UpperCAmelCase )
if self.concat_input:
lowercase__: Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_UpperCAmelCase , padding=kernel_size // 2 )
lowercase__: Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _snake_case ( self ):
self.apply(self._init_weights )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _snake_case ( self , _UpperCAmelCase ):
# just take the relevant feature maps
lowercase__: Dict = encoder_hidden_states[self.in_index]
lowercase__: Optional[int] = self.convs(_UpperCAmelCase )
if self.concat_input:
lowercase__: Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__: Dict = self.classifier(_UpperCAmelCase )
return output
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = UperNetConfig
_UpperCAmelCase :int = "pixel_values"
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _snake_case ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = value
__A = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,_UpperCAmelCase ,)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: Optional[int] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__: Any = UperNetHead(_UpperCAmelCase , in_channels=self.backbone.channels )
lowercase__: Tuple = UperNetFCNHead(_UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: str = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__: List[str] = self.backbone.forward_with_filtered_kwargs(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , output_attentions=_UpperCAmelCase )
lowercase__: Tuple = outputs.feature_maps
lowercase__: Union[str, Any] = self.decode_head(_UpperCAmelCase )
lowercase__: str = nn.functional.interpolate(_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: Any = None
if self.auxiliary_head is not None:
lowercase__: Union[str, Any] = self.auxiliary_head(_UpperCAmelCase )
lowercase__: Tuple = nn.functional.interpolate(
_UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_UpperCAmelCase )
lowercase__: List[Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__: List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__: Optional[Any] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__: Tuple = (logits,) + outputs[1:]
else:
lowercase__: Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 177
| 0
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowerCamelCase : str = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
_lowerCamelCase : Optional[int] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCamelCase : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : int = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCamelCase : Optional[int] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
_lowerCamelCase : Optional[int] = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : List[Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
_lowerCamelCase : List[Any] = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : str = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
_lowerCamelCase : Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
_lowerCamelCase : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
_lowerCamelCase : List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
_lowerCamelCase : Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
_lowerCamelCase : Any = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
_lowerCamelCase : List[str] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
_lowerCamelCase : Tuple = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
_lowerCamelCase : Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
_lowerCamelCase : Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
_lowerCamelCase : Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
_lowerCamelCase : str = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : Dict = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
_lowerCamelCase : Any = ''''''
_lowerCamelCase : Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
_lowerCamelCase : Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase : Tuple = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a_ ( __lowercase : Union[str, Any] , __lowercase : str ) -> str:
assert ReadMe.from_string(__lowercase , __lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
with pytest.raises(__lowercase , match=re.escape(expected_error.format(path='root' ) ) ):
_snake_case = ReadMe.from_string(__lowercase , __lowercase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __lowercase : List[Any] , __lowercase : int ) -> List[Any]:
with pytest.raises(__lowercase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(__lowercase , __lowercase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __lowercase : Optional[int] ) -> Optional[Any]:
ReadMe.from_string(__lowercase , __lowercase , suppress_parsing_errors=__lowercase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a_ ( __lowercase : Any , __lowercase : Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = Path(__lowercase ) / 'README.md'
with open(__lowercase , 'w+' ) as readme_file:
readme_file.write(__lowercase )
_snake_case = ReadMe.from_readme(__lowercase , __lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a_ ( __lowercase : str , __lowercase : Dict ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = Path(__lowercase ) / 'README.md'
with open(__lowercase , 'w+' ) as readme_file:
readme_file.write(__lowercase )
_snake_case = expected_error.format(path=__lowercase )
with pytest.raises(__lowercase , match=re.escape(__lowercase ) ):
_snake_case = ReadMe.from_readme(__lowercase , __lowercase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __lowercase : Dict , __lowercase : List[Any] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = Path(__lowercase ) / 'README.md'
with open(__lowercase , 'w+' ) as readme_file:
readme_file.write(__lowercase )
_snake_case = expected_error.format(path=__lowercase )
with pytest.raises(__lowercase , match=re.escape(__lowercase ) ):
ReadMe.from_readme(__lowercase , __lowercase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a_ ( __lowercase : Dict ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = Path(__lowercase ) / 'README.md'
with open(__lowercase , 'w+' ) as readme_file:
readme_file.write(__lowercase )
ReadMe.from_readme(__lowercase , __lowercase , suppress_parsing_errors=__lowercase )
| 130
|
from functools import lru_cache
@lru_cache
def a_ ( __lowercase : int ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
| 1
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_snake_case , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_snake_case , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_snake_case , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_snake_case , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_snake_case , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_snake_case , type=_snake_case , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_snake_case , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_snake_case , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowercase = parser.parse_args()
return args
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
def fn(__SCREAMING_SNAKE_CASE ):
return tokenizer(examples['text'] )
return fn
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowercase = tf.train.Features(feature=_snake_case )
lowercase = tf.train.Example(features=_snake_case )
lowercase = example.SerializeToString()
records.append(_snake_case )
return records
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase = min(len(_snake_case ) , args.limit )
lowercase = dataset.select(range(_snake_case ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
else:
lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase = tokenize_function(_snake_case )
lowercase = dataset.map(_snake_case , batched=_snake_case , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__SCREAMING_SNAKE_CASE ):
# Concatenate all texts.
lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , _snake_case , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase = dataset_tokenized.map(_snake_case , batched=_snake_case , batch_size=1000 , num_proc=4 )
lowercase = 0
lowercase = 0
for shard in range(0 , len(_snake_case ) , args.shard_size ):
lowercase = grouped_dataset[shard : shard + args.shard_size]
lowercase = len(dataset_snapshot['input_ids'] )
lowercase = os.path.join(_snake_case , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
lowercase = get_serialized_examples(_snake_case )
with tf.io.TFRecordWriter(_snake_case ) as out_file:
for i in range(len(_snake_case ) ):
lowercase = serialized_examples[i]
out_file.write(_snake_case )
print('Wrote file {} containing {} records'.format(_snake_case , _snake_case ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=_snake_case )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
main(args)
| 195
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
snake_case__ : str = None
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case__ : Dict = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
snake_case__ : Any = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
snake_case__ : Dict = '''▁'''
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''token_type_ids''']
__UpperCamelCase = FNetTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="[SEP]" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : Union[str, Any]="[CLS]" , UpperCamelCase_ : int="[MASK]" , **UpperCamelCase_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase : int = (
AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ , normalized=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else mask_token
)
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = do_lower_case
lowerCAmelCase : str = remove_space
lowerCAmelCase : Any = keep_accents
lowerCAmelCase : int = vocab_file
lowerCAmelCase : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 60
| 0
|
"""simple docstring"""
import torch
from torch import nn
class snake_case ( nn.Module):
def __init__( self : Tuple , a__ : Optional[int] , a__ : str , a__ : Dict , a__ : Union[str, Any] , a__ : Optional[Any]=1 , a__ : Any=False ) -> int:
'''simple docstring'''
super().__init__()
_A = n_token
_A = d_embed
_A = d_proj
_A = cutoffs + [n_token]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_A = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_A = nn.Parameter(torch.zeros(self.n_clusters ) )
_A = nn.ModuleList()
_A = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a__ , a__ ) ) )
else:
self.out_projs.append(a__ )
self.out_layers.append(nn.Linear(a__ , a__ ) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a__ , a__ ) ) )
self.out_layers.append(nn.Linear(a__ , r_idx - l_idx ) )
_A = keep_order
def a_ ( self : Union[str, Any] , a__ : List[str] , a__ : Any , a__ : List[Any] , a__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if proj is None:
_A = nn.functional.linear(a__ , a__ , bias=a__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_A = nn.functional.linear(a__ , proj.t().contiguous() )
_A = nn.functional.linear(a__ , a__ , bias=a__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def a_ ( self : Dict , a__ : Optional[int] , a__ : Dict=None , a__ : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_A = hidden[..., :-1, :].contiguous()
_A = labels[..., 1:].contiguous()
_A = hidden.view(-1 , hidden.size(-1 ) )
_A = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
_A = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_A = self._compute_logit(a__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_A = labels != -1_00
_A = torch.zeros_like(a__ , dtype=hidden.dtype , device=hidden.device )
_A = (
-nn.functional.log_softmax(a__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_A = nn.functional.log_softmax(a__ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a__ )
biases.append(a__ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(a__ , a__ , a__ , a__ )
_A = nn.functional.log_softmax(a__ , dim=1 )
if labels is None:
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_A = torch.zeros_like(a__ , dtype=hidden.dtype , device=hidden.device )
_A = 0
_A = [0] + self.cutoffs
for i in range(len(a__ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_A = (labels >= l_idx) & (labels < r_idx)
_A = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_A = labels.index_select(0 , a__ ) - l_idx
_A = head_logprob.index_select(0 , a__ )
_A = hidden.index_select(0 , a__ )
else:
_A = hidden
if i == 0:
if labels is not None:
_A = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(a__ , a__ , a__ , a__ )
_A = nn.functional.log_softmax(a__ , dim=1 )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_A = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_A = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , a__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def a_ ( self : Any , a__ : Optional[Any] ) -> Any:
'''simple docstring'''
if self.n_clusters == 0:
_A = self._compute_logit(a__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a__ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a__ )
biases.append(a__ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(a__ , a__ , a__ , a__ )
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_A = nn.functional.log_softmax(a__ , dim=1 )
_A = [0] + self.cutoffs
for i in range(len(a__ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(a__ , a__ , a__ , a__ )
_A = nn.functional.log_softmax(a__ , dim=1 )
_A = head_logprob[:, -i] + tail_logprob_i
_A = logprob_i
return out
| 163
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def a__ ( __lowercase ) -> Any:
random.seed(__lowercase )
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# ^^ safe to call this function even if cuda is not available
class snake_case :
def __init__( self : str , a__ : Iterable[torch.nn.Parameter] , a__ : float = 0.9_9_9_9 , a__ : float = 0.0 , a__ : int = 0 , a__ : bool = False , a__ : Union[float, int] = 1.0 , a__ : Union[float, int] = 2 / 3 , a__ : Optional[Any] = None , a__ : Dict[str, Any] = None , **a__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
if isinstance(a__ , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , a__ , standard_warn=a__ , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get("max_value" , a__ ) is not None:
_A = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , a__ , standard_warn=a__ )
_A = kwargs["max_value"]
if kwargs.get("min_value" , a__ ) is not None:
_A = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , a__ , standard_warn=a__ )
_A = kwargs["min_value"]
_A = list(a__ )
_A = [p.clone().detach() for p in parameters]
if kwargs.get("device" , a__ ) is not None:
_A = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , a__ , standard_warn=a__ )
self.to(device=kwargs["device"] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def a_ ( cls : Dict , a__ : str , a__ : str ) -> "EMAModel":
'''simple docstring'''
_A , _A = model_cls.load_config(a__ , return_unused_kwargs=a__ )
_A = model_cls.from_pretrained(a__ )
_A = cls(model.parameters() , model_cls=a__ , model_config=model.config )
ema_model.load_state_dict(a__ )
return ema_model
def a_ ( self : List[Any] , a__ : List[str] ) -> int:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop("shadow_params" , a__ )
model.register_to_config(**a__ )
self.copy_to(model.parameters() )
model.save_pretrained(a__ )
def a_ ( self : str , a__ : int ) -> float:
'''simple docstring'''
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(a__ , self.decay )
# make sure decay is not smaller than min_decay
_A = max(a__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a_ ( self : List[Any] , a__ : Iterable[torch.nn.Parameter] ) -> Optional[int]:
'''simple docstring'''
if isinstance(a__ , torch.nn.Module ):
_A = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , a__ , standard_warn=a__ , )
_A = parameters.parameters()
_A = list(a__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(a__ , modifier_rank=a__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a__ )
def a_ ( self : Dict , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
_A = list(a__ )
for s_param, param in zip(self.shadow_params , a__ ):
param.data.copy_(s_param.to(param.device ).data )
def a_ ( self : List[str] , a__ : int=None , a__ : List[Any]=None ) -> None:
'''simple docstring'''
_A = [
p.to(device=a__ , dtype=a__ ) if p.is_floating_point() else p.to(device=a__ )
for p in self.shadow_params
]
def a_ ( self : Tuple ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a_ ( self : Union[str, Any] , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
_A = [param.detach().cpu().clone() for param in parameters]
def a_ ( self : Union[str, Any] , a__ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , a__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def a_ ( self : Optional[Any] , a__ : dict ) -> None:
'''simple docstring'''
_A = copy.deepcopy(a__ )
_A = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_A = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , a__ ):
raise ValueError("Invalid min_decay" )
_A = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , a__ ):
raise ValueError("Invalid optimization_step" )
_A = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , a__ ):
raise ValueError("Invalid update_after_step" )
_A = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a__ ):
raise ValueError("Invalid use_ema_warmup" )
_A = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_A = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_A = state_dict.get("shadow_params" , a__ )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , a__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(a__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 163
| 1
|
import math
def __lowercase ( _A , _A ) -> int:
SCREAMING_SNAKE_CASE : Dict = len(_A )
SCREAMING_SNAKE_CASE : int = int(math.floor(math.sqrt(_A ) ) )
SCREAMING_SNAKE_CASE : str = 0
while arr[min(_A , _A ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[int] = step
step += int(math.floor(math.sqrt(_A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = prev + 1
if prev == min(_A , _A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase__ : Tuple = [int(item) for item in user_input.split(""",""")]
UpperCAmelCase__ : Dict = int(input("""Enter the number to be searched:\n"""))
UpperCAmelCase__ : Optional[int] = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F"""Number {x} is at index {res}""")
| 245
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE : Dict = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE : str = output[output != -float("""inf""" )]
SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class a__ ( unittest.TestCase , UpperCAmelCase ):
"""simple docstring"""
if is_tf_available():
UpperCAmelCase__ : Optional[Any] ={
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def _lowercase ( self : int ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Tuple = 2
class a__ ( tf.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
super(UpperCAmelCase__ , self ).__init__()
SCREAMING_SNAKE_CASE : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase__ , )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : Any = [[2, 0], [1_0_2, 1_0_3]]
SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE : Dict = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} )
SCREAMING_SNAKE_CASE : Optional[int] = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
SCREAMING_SNAKE_CASE : int = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE : Tuple = serving_func(**UpperCAmelCase__ )["""sequences"""]
SCREAMING_SNAKE_CASE : List[str] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : int = 2
class a__ ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
super(UpperCAmelCase__ , self ).__init__()
SCREAMING_SNAKE_CASE : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=UpperCAmelCase__ , )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : List[Any] = [[2], [1_0_2, 1_0_3]]
SCREAMING_SNAKE_CASE : List[Any] = [[1], [1, 1]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} )
SCREAMING_SNAKE_CASE : int = tf.saved_model.load(UpperCAmelCase__ ).signatures["""serving_default"""]
for input_row in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE : str = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE : List[str] = serving_func(**UpperCAmelCase__ )["""sequences"""]
SCREAMING_SNAKE_CASE : List[Any] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def _lowercase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=UpperCAmelCase__ )
class a__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , """spiece.model""" ) , """rb""" ).read() )
SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def _lowercase ( self : int , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : str ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.tokenize(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
SCREAMING_SNAKE_CASE : str = complete_model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 1_0,
"""temperature""": 0.7,
}
SCREAMING_SNAKE_CASE : Tuple = 1_4
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : List[Any] = """Hello, my dog is cute and"""
SCREAMING_SNAKE_CASE : Tuple = tokenizer(UpperCAmelCase__ , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Dict = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : int = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE : Dict = [6_3_8, 1_9_8]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : Dict = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self : str ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
SCREAMING_SNAKE_CASE : List[Any] = """Hugging Face is a technology company based in New York and Paris."""
SCREAMING_SNAKE_CASE : Optional[int] = bart_tokenizer(UpperCAmelCase__ , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE : int = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ ).numpy()
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict ) ->List[str]:
"""simple docstring"""
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
SCREAMING_SNAKE_CASE : Optional[int] = bart_model.generate(UpperCAmelCase__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class a__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE : Tuple = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo="""bar""" )
| 245
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = list(lowerCAmelCase_ )
__lowercase : str = list(lowerCAmelCase_ )
__lowercase : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
__lowercase : Dict = """_"""
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : list[str] ):
__lowercase : Tuple = []
while True:
__lowercase : Union[str, Any] = ["""$"""] * len(lowerCAmelCase_ )
__lowercase : int = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
__lowercase : int = compare_string(binary[i] , binary[j] )
if k is False:
__lowercase : Union[str, Any] = """*"""
__lowercase : Tuple = """*"""
temp.append("""X""" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
__lowercase : Union[str, Any] = list(set(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Sequence[float] ):
__lowercase : List[Any] = []
for minterm in minterms:
__lowercase : Any = """"""
for _ in range(lowerCAmelCase_ ):
__lowercase : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = list(lowerCAmelCase_ )
__lowercase : List[str] = list(lowerCAmelCase_ )
__lowercase : Dict = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[str] ):
__lowercase : Dict = []
__lowercase : List[str] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
__lowercase : Optional[Any] = j
if count == 1:
__lowercase : Optional[int] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
__lowercase : Tuple = 0
temp.append(prime_implicants[i] )
while True:
__lowercase : Dict = 0
__lowercase : List[str] = -1
__lowercase : Any = 0
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
__lowercase : List[Any] = count_n
__lowercase : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
__lowercase : List[str] = 0
def snake_case_ ( lowerCAmelCase_ : list[str] , lowerCAmelCase_ : list[str] ):
__lowercase : Optional[Any] = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Dict = prime_implicants[i].count("""_""" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
__lowercase : Union[str, Any] = 1
return chart
def snake_case_ ( ):
__lowercase : List[str] = int(input("""Enter the no. of variables\n""" ) )
__lowercase : Any = [
float(lowerCAmelCase_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__lowercase : Dict = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Union[str, Any] = check(lowerCAmelCase_ )
print("""Prime Implicants are:""" )
print(lowerCAmelCase_ )
__lowercase : Any = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Essential Prime Implicants are:""" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_a : Dict= TypeVar("T")
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class UpperCamelCase ( Generic[T] ):
def __init__(self : Any) -> None:
__snake_case : list[tuple[T, int]] = []
__snake_case : dict[T, int] = {}
__snake_case : int = 0
def __len__(self : List[str]) -> int:
return self.elements
def __repr__(self : Any) -> str:
return str(self.heap)
def _lowercase (self : Optional[int]) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _lowercase (self : Dict , _A : T , _A : int) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight))
__snake_case : List[Any] = self.elements
self.elements += 1
self._bubble_up(_A)
def _lowercase (self : Optional[Any]) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
__snake_case , __snake_case : str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__snake_case , __snake_case : Optional[int] = self.heap[0]
self._bubble_down(_A)
return elem
def _lowercase (self : Union[str, Any] , _A : T , _A : int) -> None:
# Update the weight of the given key
__snake_case : Optional[Any] = self.position_map[elem]
__snake_case : List[Any] = (elem, weight)
if position > 0:
__snake_case : Optional[int] = get_parent_position(_A)
__snake_case , __snake_case : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A)
else:
self._bubble_down(_A)
else:
self._bubble_down(_A)
def _lowercase (self : int , _A : T) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__snake_case : Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__snake_case : List[str] = get_parent_position(_A)
__snake_case , __snake_case : Any = self.heap[curr_pos]
__snake_case , __snake_case : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A , _A)
return self._bubble_up(_A)
return None
def _lowercase (self : List[Any] , _A : T) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__snake_case : Any = self.position_map[elem]
__snake_case , __snake_case : Any = self.heap[curr_pos]
__snake_case : Dict = get_child_left_position(_A)
__snake_case : Union[str, Any] = get_child_right_position(_A)
if child_left_position < self.elements and child_right_position < self.elements:
__snake_case , __snake_case : Tuple = self.heap[child_left_position]
__snake_case , __snake_case : Optional[int] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A , _A)
return self._bubble_down(_A)
if child_left_position < self.elements:
__snake_case , __snake_case : Optional[int] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A , _A)
return self._bubble_down(_A)
else:
return None
if child_right_position < self.elements:
__snake_case , __snake_case : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A , _A)
return self._bubble_down(_A)
return None
def _lowercase (self : Optional[int] , _A : int , _A : int) -> None:
# Swap the nodes at the given positions
__snake_case : Any = self.heap[nodea_pos][0]
__snake_case : Tuple = self.heap[nodea_pos][0]
__snake_case , __snake_case : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__snake_case : Union[str, Any] = nodea_pos
__snake_case : List[Any] = nodea_pos
class UpperCamelCase ( Generic[T] ):
def __init__(self : Optional[Any]) -> None:
__snake_case : dict[T, dict[T, int]] = {}
__snake_case : int = 0
def __repr__(self : Tuple) -> str:
return str(self.connections)
def __len__(self : int) -> int:
return self.nodes
def _lowercase (self : Union[str, Any] , _A : T) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__snake_case : List[str] = {}
self.nodes += 1
def _lowercase (self : Tuple , _A : T , _A : T , _A : int) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_A)
self.add_node(_A)
__snake_case : Union[str, Any] = weight
__snake_case : Dict = weight
def __UpperCAmelCase ( UpperCAmelCase_ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
__snake_case : dict[T, int] = {node: maxsize for node in graph.connections}
__snake_case : dict[T, T | None] = {node: None for node in graph.connections}
__snake_case : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCAmelCase_ , UpperCAmelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__snake_case : Optional[int] = priority_queue.extract_min()
__snake_case : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__snake_case : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase_ , dist[neighbour] )
__snake_case : Union[str, Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
__snake_case : Optional[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__snake_case : Optional[int] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase_ , dist[neighbour] )
__snake_case : Tuple = node
return dist, parent
| 172
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any]= logging.get_logger(__name__)
_a : str= {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = """mgp-str"""
def __init__(self : List[Any] , _A : Dict=[32, 1_28] , _A : Any=4 , _A : int=3 , _A : Any=27 , _A : List[str]=38 , _A : str=5_02_57 , _A : Optional[int]=3_05_22 , _A : Union[str, Any]=7_68 , _A : Tuple=12 , _A : List[str]=12 , _A : List[str]=4.0 , _A : Optional[int]=True , _A : Optional[Any]=False , _A : Dict=1E-5 , _A : Optional[int]=0.0 , _A : str=0.0 , _A : int=0.0 , _A : str=False , _A : List[Any]=0.02 , **_A : Union[str, Any] , ) -> Tuple:
super().__init__(**_A)
__snake_case : Union[str, Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : int = num_channels
__snake_case : int = max_token_length
__snake_case : List[Any] = num_character_labels
__snake_case : Optional[int] = num_bpe_labels
__snake_case : Optional[Any] = num_wordpiece_labels
__snake_case : int = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = mlp_ratio
__snake_case : List[str] = distilled
__snake_case : List[Any] = layer_norm_eps
__snake_case : List[Any] = drop_rate
__snake_case : Optional[int] = qkv_bias
__snake_case : Optional[int] = attn_drop_rate
__snake_case : int = drop_path_rate
__snake_case : List[str] = output_aa_attentions
__snake_case : Optional[Any] = initializer_range
| 172
| 1
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = ReformerTokenizer
lowercase_ = ReformerTokenizerFast
lowercase_ = True
lowercase_ = False
lowercase_ = True
def snake_case ( self : Dict ):
super().setUp()
lowercase__ : Tuple = ReformerTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict ):
lowercase__ : str = "<s>"
lowercase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_000 )
def snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : Dict = "I was born in 92000, and this is falsé."
lowercase__ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.get_rust_tokenizer()
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : str = "This is a simple input"
lowercase__ : str = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Any = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[Any] = ReformerTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
lowercase__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def snake_case ( self : Any ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def snake_case ( self : Any ):
lowercase__ : Any = "Hello World!"
lowercase__ : str = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : Tuple = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowercase__ : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def snake_case ( self : Any ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase__ : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : Dict = " ".join(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
lowercase__ : Any = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase__ : Any = encoded_sequence["input_ids"].shape
lowercase__ : Optional[int] = ReformerModel(SCREAMING_SNAKE_CASE )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE )
model(**SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Union[str, Any] ):
# fmt: off
lowercase__ : Tuple = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase__ : List[str] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=SCREAMING_SNAKE_CASE , sequences=SCREAMING_SNAKE_CASE , )
| 130
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase__ = torch.rand(1) * 9_9_9
lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 6_6_6
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 130
| 1
|
"""simple docstring"""
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = len(lowerCamelCase__ )
while cur > 1:
# Find the maximum number in arr
lowercase__ : Optional[int] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowercase__ : Tuple = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase__ )]
# Reverse whole list
lowercase__ : List[str] = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 354
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class snake_case__(Generic[KEY, VAL] ):
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
class snake_case__(_Item ):
"""simple docstring"""
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __bool__( self : Tuple ):
return False
lowerCAmelCase__ = _DeletedItem()
class snake_case__(MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int = 8 , SCREAMING_SNAKE_CASE : float = 0.75 ):
lowercase__ : Any = initial_block_size
lowercase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ : Dict = capacity_factor
lowercase__ : Optional[int] = 0
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : KEY ):
return hash(SCREAMING_SNAKE_CASE ) % len(self._buckets )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
return (ind + 1) % len(self._buckets )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
lowercase__ : Tuple = self._buckets[ind]
if not stored:
lowercase__ : int = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
lowercase__ : str = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return True
else:
return False
def snake_case ( self : str ):
lowercase__ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Tuple = self._buckets
lowercase__ : Optional[int] = [None] * new_size
lowercase__ : int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self : int ):
self._resize(len(self._buckets ) * 2 )
def snake_case ( self : Optional[Any] ):
self._resize(len(self._buckets ) // 2 )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : KEY ):
lowercase__ : Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ : Union[str, Any] = self._get_next_ind(SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
if self._try_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
break
def __setitem__( self : List[str] , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __delitem__( self : int , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
lowercase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE )
def __len__( self : Optional[Any] ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowercase__ : int = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 121
| 0
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__A =False
class _snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa)
pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
UpperCAmelCase__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""")
UpperCAmelCase__ : List[Any] = torch.manual_seed(0)
UpperCAmelCase__ : int = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCamelCase , text_to_image_strength=0.75 , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(_lowerCamelCase , torch_dtype=torch.floataa)
pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = generator.manual_seed(0)
UpperCAmelCase__ : Dict = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCamelCase , text_to_image_strength=0.75 , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa)
pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = """cyberpunk 2077"""
UpperCAmelCase__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""")
UpperCAmelCase__ : Any = torch.manual_seed(0)
UpperCAmelCase__ : Optional[Any] = pipe.dual_guided(
prompt=_lowerCamelCase , image=_lowerCamelCase , text_to_image_strength=0.75 , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase__ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : str = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCAmelCase__ : List[str] = """A painting of a squirrel eating a burger """
UpperCAmelCase__ : int = torch.manual_seed(0)
UpperCAmelCase__ : List[str] = pipe.text_to_image(
prompt=_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""").images
UpperCAmelCase__ : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : List[str] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCAmelCase__ : List[str] = pipe.image_variation(_lowerCamelCase , generator=_lowerCamelCase , output_type="""numpy""").images
UpperCAmelCase__ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ : Dict = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
| 163
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=2 , _lowerCamelCase=24 , _lowerCamelCase=16 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=None , _lowerCamelCase=2 , _lowerCamelCase=2 , ):
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : Optional[int] = max_length
UpperCAmelCase__ : int = num_mel_bins
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : List[Any] = scope
UpperCAmelCase__ : str = frequency_stride
UpperCAmelCase__ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase__ : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase__ : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase__ : Dict = frequency_out_dimension * time_out_dimension
UpperCAmelCase__ : Dict = num_patches + 2
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Dict = self.get_config()
return config, input_values, labels
def snake_case__ ( self):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = ASTModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Any = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :List[str] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase :List[Any] = False
lowerCAmelCase :Any = False
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :int = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ASTModelTester(self)
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""")
def snake_case__ ( self):
pass
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear))
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase)
UpperCAmelCase__ : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = ASTModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
def _UpperCamelCase ( ):
UpperCAmelCase__ : Dict = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase__ , UpperCAmelCase__ : int = torchaudio.load(UpperCamelCase__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _snake_case ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""")
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = self.default_feature_extractor
UpperCAmelCase__ : List[str] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""").to(_lowerCamelCase)
UpperCAmelCase__ : str = self.default_feature_extractor
UpperCAmelCase__ , UpperCAmelCase__ : Dict = prepare_audio()
UpperCAmelCase__ : Dict = audio.squeeze().numpy()
UpperCAmelCase__ : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors="""pt""").to(_lowerCamelCase)
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase)
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , _lowerCamelCase)
UpperCAmelCase__ : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602]).to(_lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4))
| 163
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
_a = 42
_a = 42
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [[] for _ in range(a__ )]
UpperCamelCase__ :List[str] = size
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self._size
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = deque([start_vertex] )
UpperCamelCase__ :Optional[Any] = [None] * self.size
UpperCamelCase__ :List[Any] = 0
while queue:
UpperCamelCase__ :Tuple = queue.popleft()
UpperCamelCase__ :List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase__ :str = current_distance + edge.weight
UpperCamelCase__ :Tuple = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase__ :Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a ( __a ) -> Optional[int]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def a ( __a ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 219
| 0
|
def __lowerCamelCase ( snake_case__ = 60_08_51_47_51_43 ) -> int:
"""simple docstring"""
try:
_SCREAMING_SNAKE_CASE = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
while i * i <= n:
while n % i == 0:
_SCREAMING_SNAKE_CASE = i
n //= i
i += 1
if n > 1:
_SCREAMING_SNAKE_CASE = n
return int(snake_case__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 306
|
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
if attention_mask is None:
_a = tf.cast(tf.math.not_equal(_lowerCAmelCase, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCamelCase :
'''simple docstring'''
A_ : Optional[int] = OPTConfig
A_ : Union[str, Any] = {}
A_ : List[Any] = 'gelu'
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , ) -> int:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
_a = embed_dim
_a = word_embed_proj_dim
_a = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a = tf.concat([input_ids, eos_tensor] , axis=1 )
_a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
_a = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = TFOPTModel(config=__UpperCAmelCase )
_a = inputs_dict['''input_ids''']
_a = input_ids[:1, :]
_a = inputs_dict['''attention_mask'''][:1, :]
_a = 1
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a = tf.concat([input_ids, next_tokens] , axis=-1 )
_a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a = output_from_no_past[:, -3:, random_slice_idx]
_a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 )
@require_tf
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A_ : Optional[int] = (TFOPTForCausalLM,) if is_tf_available() else ()
A_ : List[str] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
A_ : Union[str, Any] = False
A_ : List[Any] = False
A_ : int = False
A_ : Any = 10
def _UpperCAmelCase ( self ) -> str:
_a = TFOPTModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_a = model_class(config=__UpperCAmelCase )
_a = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
_a = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
_a = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
_a = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
_a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_a = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
_a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_a = False
self.assertTrue(__UpperCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
return tf.constant(_lowerCAmelCase, dtype=tf.intaa )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = 99
def _UpperCAmelCase ( self ) -> str:
_a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_a = input_ids.shape[0]
_a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> int:
_a = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
_a = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_a = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
_a = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
_a = (1, 11, 512)
self.assertEqual(output.shape , __UpperCAmelCase )
_a = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4e-3 ) )
_a = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
_a = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4e-2 ) )
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
_a = '''facebook/opt-350m'''
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = TFOPTForCausalLM.from_pretrained(self.path_model )
_a = GPTaTokenizer.from_pretrained(self.path_model )
_a = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_a = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_a = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-4 ) )
_a = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
_a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-4 ) )
@require_tf
@slow
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''facebook/opt-125m'''
_a = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_a = []
_a = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
_a = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=10 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
_a = '''facebook/opt-350m'''
_a = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
_a = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
_a = '''left'''
# use different length sentences to test batching
_a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''tf''' , padding=__UpperCAmelCase )
_a = inputs['''input_ids''']
_a = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['''attention_mask'''] )
_a = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_a = model.generate(input_ids=__UpperCAmelCase )
_a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
_a = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_a = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_a = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_a = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''facebook/opt-350m'''
_a = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_a = []
_a = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
_a = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''tf''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=10 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 153
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 153
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Tuple = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
UpperCamelCase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
UpperCamelCase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["input_ids", "attention_mask"]
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
__UpperCamelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__UpperCamelCase = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCamelCase = src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase = self.convert_tokens_to_ids(self._src_lang )
__UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase = src_lang
__UpperCamelCase = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = self.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = tgt_lang_id
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = src_lang
__UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
__UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.convert_tokens_to_ids(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
__UpperCamelCase = self.convert_ids_to_tokens(self.prefix_tokens )
__UpperCamelCase = self.convert_ids_to_tokens(self.suffix_tokens )
__UpperCamelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 316
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None:
__UpperCamelCase = ""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ):
__UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case )
return decoded
def A ( snake_case :list[int] ) -> list[str]:
__UpperCamelCase = []
for key in product(snake_case , repeat=3 ):
__UpperCamelCase = try_key(snake_case , snake_case )
if encoded is not None:
possibles.append(snake_case )
return possibles
def A ( snake_case :list[str] , snake_case :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A ( snake_case :str = "p059_cipher.txt" ) -> int:
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' )
__UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )]
__UpperCamelCase = filter_valid_chars(snake_case )
for common_word in COMMON_WORDS:
__UpperCamelCase = filter_common_word(snake_case , snake_case )
if len(snake_case ) == 1:
break
__UpperCamelCase = possibles[0]
return sum(ord(snake_case ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 316
| 1
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
def __init__( self : str ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int]=3 ,__lowerCamelCase : List[str]=32 ,__lowerCamelCase : Tuple=3 ,__lowerCamelCase : List[Any]=10 ,__lowerCamelCase : Optional[int]=[8, 16, 32, 64] ,__lowerCamelCase : List[Any]=[1, 1, 2, 1] ,__lowerCamelCase : Any=True ,__lowerCamelCase : int=True ,__lowerCamelCase : int="relu" ,__lowerCamelCase : Tuple=3 ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"] ,__lowerCamelCase : Any=[2, 3, 4] ,__lowerCamelCase : List[Any]=1 ,):
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = num_channels
a = embeddings_size
a = hidden_sizes
a = depths
a = is_training
a = use_labels
a = hidden_act
a = num_labels
a = scope
a = len(__lowerCamelCase )
a = out_features
a = out_indices
a = num_groups
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] ,self.num_labels )
a = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = self.num_labels
a = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : int ):
'''simple docstring'''
a = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a = None
a = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = BitModelTester(self )
a = ConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowerCamelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : Any ):
a = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) )
a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a = layer_type
a = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
a = model(**__lowerCamelCase )
# verify the logits
a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
a = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = BitConfig
SCREAMING_SNAKE_CASE_ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = BitModelTester(self )
| 330
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({} )
SCREAMING_SNAKE_CASE_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 330
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = 0 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = right or len(snake_case_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case_ , snake_case_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''maskformer-swin'''
__UpperCamelCase : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Dict=9_6 , lowerCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , lowerCAmelCase_ : Optional[Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : Optional[Any]=4.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[Any] = image_size
_A: Optional[int] = patch_size
_A: Optional[Any] = num_channels
_A: str = embed_dim
_A: Any = depths
_A: str = len(lowerCAmelCase_ )
_A: Any = num_heads
_A: int = window_size
_A: Dict = mlp_ratio
_A: str = qkv_bias
_A: List[str] = hidden_dropout_prob
_A: List[Any] = attention_probs_dropout_prob
_A: Dict = drop_path_rate
_A: List[Any] = hidden_act
_A: Optional[int] = use_absolute_embeddings
_A: Tuple = layer_norm_eps
_A: Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A: Any = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_A: Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_A , _A: str = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 121
| 0
|
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__lowercase : Tuple = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.