code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=0 ) -> Tuple:
lowerCAmelCase__ = np.random.RandomState(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = pipe(**lowerCamelCase_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ = pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''np''' , )
lowerCAmelCase__ = text_inputs['''input_ids''']
lowerCAmelCase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCAmelCase__ = prompt_embeds
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * ['''this is a negative prompt''']
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = 3 * [inputs['''prompt''']]
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
lowerCAmelCase__ = self.get_dummy_inputs()
lowerCAmelCase__ = 3 * [inputs.pop('''prompt''' )]
lowerCAmelCase__ = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ = pipe.tokenizer(
lowerCamelCase_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='''np''' , )
lowerCAmelCase__ = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCAmelCase__ , lowerCAmelCase__ = embeds
# forward
lowerCAmelCase__ = pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = False
return options
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# using the PNDM scheduler by default
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''open neural network exchange'''
lowerCAmelCase__ = np.random.RandomState(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''open neural network exchange'''
lowerCAmelCase__ = np.random.RandomState(0 )
lowerCAmelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase_ , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = 0
def test_callback_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase__ = latents[0, -3:, -3:, -1]
lowerCAmelCase__ = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCAmelCase__ = False
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''Andromeda galaxy in a bottle'''
lowerCAmelCase__ = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert pipe.safety_checker is None
lowerCAmelCase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 90
|
import math
from datetime import datetime, timedelta
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = year % 19
_lowerCAmelCase : Tuple = year % 4
_lowerCAmelCase : Dict = year % 7
_lowerCAmelCase : Optional[Any] = math.floor(year / 100 )
_lowerCAmelCase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowerCAmelCase : int = leap_day_inhibits / 4
_lowerCAmelCase : List[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowerCAmelCase : str = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowerCAmelCase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_lowerCamelCase : List[Any] = "will be" if year > datetime.now().year else "was"
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 429
| 0
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Any, UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A__ = mf_knapsack(i - 1, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
else:
A__ = max(
mf_knapsack(i - 1, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ), mf_knapsack(i - 1, lowerCamelCase_, lowerCamelCase_, j - wt[i - 1] ) + val[i - 1], )
A__ = val
return f[i][j]
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Tuple, UpperCAmelCase_ : Any ) -> List[str]:
"""simple docstring"""
A__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
A__ = dp[i - 1][w_]
return dp[n][w_], dp
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : Tuple, UpperCAmelCase_ : int ) -> Any:
"""simple docstring"""
if not (isinstance(lowerCamelCase_, (list, tuple) ) and isinstance(lowerCamelCase_, (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
A__ = len(lowerCamelCase_ )
if num_items != len(lowerCamelCase_ ):
A__ = (
'The number of weights must be the same as the number of values.\n'
F"""But got {num_items} weights and {len(lowerCamelCase_ )} values"""
)
raise ValueError(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
if not isinstance(wt[i], lowerCamelCase_ ):
A__ = (
'All weights must be integers but got weight of '
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowerCamelCase_ )
A__ = knapsack(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
A__ = set()
_construct_solution(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return optimal_val, example_optional_set
def _lowerCamelCase ( UpperCAmelCase_ : List[Any], UpperCAmelCase_ : int, UpperCAmelCase_ : List[Any], UpperCAmelCase_ : Tuple, UpperCAmelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_, lowerCamelCase_, i - 1, lowerCamelCase_, lowerCamelCase_ )
else:
optimal_set.add(lowerCamelCase_ )
_construct_solution(lowerCamelCase_, lowerCamelCase_, i - 1, j - wt[i - 1], lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase = [3, 2, 4, 4]
UpperCamelCase = [4, 3, 2, 3]
UpperCamelCase = 4
UpperCamelCase = 6
UpperCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCamelCase , UpperCamelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCamelCase , UpperCamelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 706
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCamelCase ( UpperCAmelCase_ : Dict, UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
A__ = 1.5
A__ = int(factor * num_class_images )
A__ = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=UpperCAmelCase_, aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""", exist_ok=UpperCAmelCase_ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
A__ = client.query(text=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A__ = int(factor * num_images )
A__ = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=UpperCAmelCase_, aesthetic_weight=0.1, )
A__ = 0
A__ = 0
A__ = tqdm(desc="downloading real regularization images", total=UpperCAmelCase_ )
with open(F"""{class_data_dir}/caption.txt""", "w" ) as fa, open(F"""{class_data_dir}/urls.txt""", "w" ) as fa, open(
F"""{class_data_dir}/images.txt""", "w" ) as fa:
while total < num_class_images:
A__ = class_images[count]
count += 1
try:
A__ = requests.get(images["url"] )
if img.status_code == 200:
A__ = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""", "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser("", add_help=UpperCAmelCase_ )
parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=UpperCAmelCase_, type=UpperCAmelCase_ )
parser.add_argument("--class_data_dir", help="path to save images", required=UpperCAmelCase_, type=UpperCAmelCase_ )
parser.add_argument("--num_class_images", help="number of images to download", default=200, type=UpperCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 562
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
SCREAMING_SNAKE_CASE : int = number_of_bytes // partitions
SCREAMING_SNAKE_CASE : int = []
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = i * bytes_per_partition + 1
SCREAMING_SNAKE_CASE : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 379
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase__ = True
except (ImportError, AttributeError):
UpperCamelCase__ = object
def UpperCAmelCase ( *a_ , **a_ ) -> List[str]:
"""simple docstring"""
pass
UpperCamelCase__ = False
UpperCamelCase__ = logging.get_logger('transformers-cli/serving')
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
A_ : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase_ , args.host , args.port , args.workers )
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = 42
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = 42
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCamelCase = 42
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase ) -> Optional[int]:
A_ : Any = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=__lowerCamelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=__lowerCamelCase , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=__lowerCamelCase , default=8888 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=__lowerCamelCase , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=__lowerCamelCase , help="""Model\'s name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=__lowerCamelCase , help="""Model\'s config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=__lowerCamelCase , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=__lowerCamelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : List[Any] = pipeline
A_ : int = host
A_ : Optional[Any] = port
A_ : Optional[int] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F"Serving model over {host}:{port}" )
A_ : Optional[int] = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=__lowerCamelCase , response_class=__lowerCamelCase , methods=["""POST"""] , ),
] , timeout=600 , )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase_ ( self ) -> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase_ ( self , _lowerCamelCase = Body(__lowerCamelCase , embed=__lowerCamelCase ) , _lowerCamelCase = Body(__lowerCamelCase , embed=__lowerCamelCase ) ) -> Any:
try:
A_ : Tuple = self._pipeline.tokenizer.tokenize(__lowerCamelCase )
if return_ids:
A_ : Tuple = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCamelCase )
return ServeTokenizeResult(tokens=__lowerCamelCase , tokens_ids=__lowerCamelCase )
else:
return ServeTokenizeResult(tokens=__lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCamelCase )} )
def UpperCAmelCase_ ( self , _lowerCamelCase = Body(__lowerCamelCase , embed=__lowerCamelCase ) , _lowerCamelCase = Body(__lowerCamelCase , embed=__lowerCamelCase ) , _lowerCamelCase = Body(__lowerCamelCase , embed=__lowerCamelCase ) , ) -> List[str]:
try:
A_ : List[str] = self._pipeline.tokenizer.decode(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return ServeDeTokenizeResult(model="""""" , text=__lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(__lowerCamelCase )} )
async def UpperCAmelCase_ ( self , _lowerCamelCase=Body(__lowerCamelCase , embed=__lowerCamelCase ) ) -> Optional[Any]:
if len(__lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
A_ : int = self._pipeline(__lowerCamelCase )
return ServeForwardResult(output=__lowerCamelCase )
except Exception as e:
raise HTTPException(500 , {"""error""": str(__lowerCamelCase )} )
| 702
|
'''simple docstring'''
import numpy as np
import qiskit
def UpperCAmelCase ( a_ = 8 , a_ = None ) -> str:
"""simple docstring"""
A_ : List[Any] = np.random.default_rng(seed=a_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
A_ : Union[str, Any] = 6 * key_len
# Measurement basis for Alice's qubits.
A_ : Dict = rng.integers(2 , size=a_ )
# The set of states Alice will prepare.
A_ : Optional[int] = rng.integers(2 , size=a_ )
# Measurement basis for Bob's qubits.
A_ : List[Any] = rng.integers(2 , size=a_ )
# Quantum Circuit to simulate BB84
A_ : Optional[Any] = qiskit.QuantumCircuit(a_ , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(a_ ):
if alice_state[index] == 1:
bbaa_circ.x(a_ )
if alice_basis[index] == 1:
bbaa_circ.h(a_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(a_ ):
if bob_basis[index] == 1:
bbaa_circ.h(a_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
A_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
A_ : int = qiskit.execute(a_ , a_ , shots=1 , seed_simulator=a_ )
# Returns the result of measurement.
A_ : Any = job.result().get_counts(a_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
A_ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
a_ , a_ , a_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
A_ : Optional[int] = gen_key[:key_len] if len(a_ ) >= key_len else gen_key.ljust(a_ , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 385
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaInpaintPipeline
A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
A = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ :List[str] = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.dummy_unet
lowerCAmelCase__ :Any = self.dummy_movq
lowerCAmelCase__ :Optional[int] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
lowerCAmelCase__ :Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ :int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
lowerCAmelCase__ :int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ :Union[str, Any] = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase__ :Dict = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ :Tuple = 0
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :Union[str, Any] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Dict = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "cpu"
lowerCAmelCase__ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ :Union[str, Any] = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :List[Any] = output.images
lowerCAmelCase__ :Dict = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ :Dict = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ :Optional[Any] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def snake_case_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase__ :Optional[Any] = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase__ :Union[str, Any] = 0
lowerCAmelCase__ :Tuple = "a hat"
lowerCAmelCase__ :List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCAmelCase__ :int = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase__ :int = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ :int = pipeline(
image=_lowerCAmelCase , mask_image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase__ :Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 145
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Any = logging.get_logger(__name__)
_a : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_a : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_a : str = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = GPTaTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :List[str] = kwargs.pop("add_bos_token" , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Any = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :Union[str, Any] = pre_tok_class(**_lowerCAmelCase )
lowerCAmelCase__ :int = add_prefix_space
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
lowerCAmelCase__ :int = input_ids[-self.model_max_length :]
return input_ids
| 145
| 1
|
"""simple docstring"""
def a_ ( lowerCamelCase = 2_0_0 ):
UpperCAmelCase__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase__ = [0] * (pence + 1)
UpperCAmelCase__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : str = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "ctrl"
snake_case__ = ["past_key_values"]
snake_case__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any ,lowerCamelCase__ : str=246_534 ,lowerCamelCase__ : List[str]=256 ,lowerCamelCase__ : Optional[int]=1_280 ,lowerCamelCase__ : Any=8_192 ,lowerCamelCase__ : int=48 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=1e-6 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Tuple=True ,**lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_positions
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = dff
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
super().__init__(**lowerCamelCase__ )
| 632
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A : Dict = logging.get_logger('''transformers.models.encodec''')
__A : str = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
__A : Tuple = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
__A : List[Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
__A : Any = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
__A : Optional[int] = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
__A : str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A : Union[str, Any] = []
__A : List[Any] = []
def lowercase ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : int , __snake_case : Tuple ):
for attribute in key.split('''.''' ):
lowercase_ : Dict = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase_ : Any = getattr(__snake_case , __snake_case ).shape
else:
lowercase_ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase_ : Union[str, Any] = value
elif weight_type == "bias":
lowercase_ : List[str] = value
elif weight_type == "running_mean":
lowercase_ : List[str] = value
elif weight_type == "running_var":
lowercase_ : int = value
elif weight_type == "num_batches_tracked":
lowercase_ : str = value
elif weight_type == "weight_ih_l0":
lowercase_ : int = value
elif weight_type == "weight_hh_l0":
lowercase_ : Tuple = value
elif weight_type == "bias_ih_l0":
lowercase_ : int = value
elif weight_type == "bias_hh_l0":
lowercase_ : str = value
elif weight_type == "weight_ih_l1":
lowercase_ : Optional[Any] = value
elif weight_type == "weight_hh_l1":
lowercase_ : Union[str, Any] = value
elif weight_type == "bias_ih_l1":
lowercase_ : str = value
elif weight_type == "bias_hh_l1":
lowercase_ : List[str] = value
else:
lowercase_ : str = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : List[Any] ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase_ , lowercase_ : Union[str, Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Tuple ):
lowercase_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase_ : Dict = MAPPING_24K
elif model_name == "encodec_48khz":
lowercase_ : List[str] = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(F'''{name} was ignored''' )
continue
lowercase_ : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase_ , lowercase_ : List[str] = key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase_ : Tuple = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowercase_ : Union[str, Any] = True
if "*" in mapped_key:
lowercase_ : Tuple = name.split(__snake_case )[0].split('''.''' )[-2]
lowercase_ : Tuple = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowercase_ : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
lowercase_ : List[str] = '''weight_v'''
elif "weight_ih_l0" in name:
lowercase_ : int = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowercase_ : Any = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowercase_ : Optional[Any] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowercase_ : Optional[Any] = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowercase_ : int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowercase_ : List[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowercase_ : Union[str, Any] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowercase_ : List[Any] = '''bias_hh_l1'''
elif "bias" in name:
lowercase_ : List[Any] = '''bias'''
elif "weight" in name:
lowercase_ : List[Any] = '''weight'''
elif "running_mean" in name:
lowercase_ : Any = '''running_mean'''
elif "running_var" in name:
lowercase_ : str = '''running_var'''
elif "num_batches_tracked" in name:
lowercase_ : Optional[Any] = '''num_batches_tracked'''
else:
lowercase_ : List[Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowercase ( __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : int , __snake_case : int=None , __snake_case : List[str]=None , ):
if config_path is not None:
lowercase_ : List[str] = EncodecConfig.from_pretrained(__snake_case )
else:
lowercase_ : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase_ : Optional[Any] = [8, 5, 4, 4]
lowercase_ : Optional[int] = [2.2]
lowercase_ : Union[str, Any] = 6_4
lowercase_ : Dict = 3_2_0_0_0
lowercase_ : int = 2_0_4_8
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : Any = False
elif model_name == "encodec_48khz":
lowercase_ : Dict = [8, 5, 4, 2]
lowercase_ : Optional[int] = [3.0, 6.0, 12.0, 24.0]
lowercase_ : Union[str, Any] = 4_8_0_0_0
lowercase_ : Optional[int] = 2
lowercase_ : Optional[Any] = False
lowercase_ : Dict = '''time_group_norm'''
lowercase_ : List[Any] = True
lowercase_ : List[str] = 1.0
lowercase_ : Dict = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowercase_ : List[Any] = EncodecModel(__snake_case )
lowercase_ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__snake_case )
lowercase_ : Dict = torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase_ : Dict = original_checkpoint['''best_state''']
recursively_load_weights(__snake_case , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__A : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 231
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__A : Optional[int] = None
__A : str = logging.get_logger(__name__)
__A : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
__A : Tuple = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
__A : str = '''▁'''
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = AlbertTokenizer
def __init__( self : Optional[int] , A : Dict=None , A : Tuple=None , A : int=True , A : List[str]=True , A : int=False , A : List[Any]="[CLS]" , A : Dict="[SEP]" , A : Tuple="<unk>" , A : Tuple="[SEP]" , A : Optional[Any]="<pad>" , A : List[str]="[CLS]" , A : Optional[int]="[MASK]" , **A : int , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase_ : Union[str, Any] = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowercase_ : int = do_lower_case
lowercase_ : str = remove_space
lowercase_ : Tuple = keep_accents
lowercase_ : Optional[Any] = vocab_file
lowercase_ : List[str] = False if not self.vocab_file else True
def A ( self : str , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : Optional[Any] = [self.sep_token_id]
lowercase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : List[str] , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 231
| 1
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase__ = "\\n Text data.\n Second line of data."
lowercase__ = "file"
@pytest.fixture(scope='session' )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + """.zstd""")
UpperCAmelCase : Optional[Any] = bytes(__lowerCAmelCase , 'utf-8' )
with zstd.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture
def UpperCamelCase( UpperCAmelCase_ ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCAmelCase ) , 'w' ) as f:
f.write(__lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCAmelCase : Dict = input_paths[compression_format]
UpperCAmelCase : Dict = tmp_path / """cache"""
UpperCAmelCase : Any = DownloadConfig(cache_dir=__lowerCAmelCase , extract_compressed_file=__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
with open(__lowerCAmelCase ) as f:
UpperCAmelCase : Optional[int] = f.read()
with open(__lowerCAmelCase ) as f:
UpperCAmelCase : Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = """custom_cache"""
UpperCAmelCase : Dict = """custom_extracted_dir"""
UpperCAmelCase : List[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCAmelCase : List[Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __lowerCAmelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__lowerCAmelCase ) )
UpperCAmelCase : List[str] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase : Optional[int] = xz_file
UpperCAmelCase : str = (
DownloadConfig(extract_compressed_file=__lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCAmelCase )
)
UpperCAmelCase : List[str] = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
assert Path(__lowerCAmelCase ).parent.parts[-2:] == expected
def UpperCamelCase( UpperCAmelCase_ ):
# absolute path
UpperCAmelCase : Optional[int] = str(Path(__lowerCAmelCase ).resolve() )
assert cached_path(__lowerCAmelCase ) == text_file
# relative path
UpperCAmelCase : List[str] = str(Path(__lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCAmelCase ) == text_file
def UpperCamelCase( UpperCAmelCase_ ):
# absolute path
UpperCAmelCase : Optional[Any] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
# relative path
UpperCAmelCase : Union[str, Any] = """./__missing_file__.txt"""
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__lowerCAmelCase ) as f:
UpperCAmelCase : Optional[int] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( ):
with pytest.raises(__lowerCAmelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
http_get('https://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
ftp_get('ftp://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
fsspec_get('s3://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
fsspec_head('s3://huggingface.co' )
| 712
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695
| 0
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class a_ ( A_ ):
def __init__( self : Tuple , a_ : int , a_ : Tuple ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
def __call__( self : Optional[int] ) -> Union[str, Any]:
snake_case: Dict =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case: List[Any] =1
snake_case: Optional[Any] =self.unet(a_ , a_ ).sample
snake_case: Optional[int] =self.scheduler.step(a_ , a_ , a_ ).prev_sample
snake_case: Dict =scheduler_output - scheduler_output + torch.ones_like(a_ )
return result
| 350
|
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase )
for _ in range(UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowercase = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 157
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: LevitConfig , SCREAMING_SNAKE_CASE: Path , SCREAMING_SNAKE_CASE: bool = True ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_lowerCAmelCase = timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 192:
_lowerCAmelCase = timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 256:
_lowerCAmelCase = timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE )
if hidden_sizes == 384:
_lowerCAmelCase = timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE )
from_model.eval()
_lowerCAmelCase = LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = from_model.state_dict()
_lowerCAmelCase = list(from_model.state_dict().keys() )
_lowerCAmelCase = list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.randn((2, 3, 224, 224) )
_lowerCAmelCase = from_model(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = our_model(SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
_lowerCAmelCase = name
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_lowerCAmelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __snake_case ( SCREAMING_SNAKE_CASE: Path , SCREAMING_SNAKE_CASE: str = None , SCREAMING_SNAKE_CASE: bool = True ):
"""simple docstring"""
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 1000
_lowerCAmelCase = (1, num_labels)
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = num_labels
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
_lowerCAmelCase = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 491
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_snake_case = getLogger(__name__)
_snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: int = 8 , SCREAMING_SNAKE_CASE: str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE: Optional[int]=False , SCREAMING_SNAKE_CASE: int="summarization" , SCREAMING_SNAKE_CASE: List[Any]=None , **SCREAMING_SNAKE_CASE: Tuple , ):
"""simple docstring"""
_lowerCAmelCase = Path(SCREAMING_SNAKE_CASE ).open('w' , encoding='utf-8' )
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
if fpaa:
_lowerCAmelCase = model.half()
_lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if prefix is None:
_lowerCAmelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ):
_lowerCAmelCase = [prefix + text for text in examples_chunk]
_lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE , padding='longest' ).to(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
_lowerCAmelCase = int(time.time() - start_time ) # seconds
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __snake_case ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __snake_case ( SCREAMING_SNAKE_CASE: Any=True ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE , default=8 , required=SCREAMING_SNAKE_CASE , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE , default=-1 , required=SCREAMING_SNAKE_CASE , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
_lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_lowerCAmelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
_lowerCAmelCase = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
_lowerCAmelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
_lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = score_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
scores.update(SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE )
if args.info:
_lowerCAmelCase = args.info
if verbose:
print(SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 491
| 1
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : float , snake_case__ : Callable , snake_case__ : int , snake_case__ : float = 1.0 , snake_case__ : str = None , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Union[str, Any] = initial_learning_rate
UpperCAmelCase__ : Union[str, Any] = warmup_steps
UpperCAmelCase__ : int = power
UpperCAmelCase__ : int = decay_schedule_fn
UpperCAmelCase__ : Union[str, Any] = name
def __call__( self : Optional[Any] , snake_case__ : Dict ):
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase__ : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
UpperCAmelCase__ : int = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase__ : Dict = global_step_float / warmup_steps_float
UpperCAmelCase__ : int = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 0.0 , lowercase__ = 0.9 , lowercase__ = 0.999 , lowercase__ = 1E-8 , lowercase__ = None , lowercase__ = None , lowercase__ = 0.0 , lowercase__ = 1.0 , lowercase__ = None , ):
UpperCAmelCase__ : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , )
if num_warmup_steps:
UpperCAmelCase__ : int = WarmUp(
initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , )
if weight_decay_rate > 0.0:
UpperCAmelCase__ : Tuple = AdamWeightDecay(
learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__UpperCAmelCase , )
else:
UpperCAmelCase__ : int = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , snake_case__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , snake_case__ : float = 0.9 , snake_case__ : float = 0.999 , snake_case__ : float = 1e-7 , snake_case__ : bool = False , snake_case__ : float = 0.0 , snake_case__ : Optional[List[str]] = None , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "AdamWeightDecay" , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = weight_decay_rate
UpperCAmelCase__ : Optional[int] = include_in_weight_decay
UpperCAmelCase__ : Any = exclude_from_weight_decay
@classmethod
def UpperCamelCase ( cls : List[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def UpperCamelCase ( self : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : int = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : str=None , **snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : int ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase__ : Dict = apply_state or {}
UpperCAmelCase__ : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase__ : List[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCAmelCase__ : Any = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int=None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
UpperCAmelCase__ : Any = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def UpperCamelCase ( self : List[Any] , snake_case__ : int ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class UpperCAmelCase_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = []
UpperCAmelCase__ : List[Any] = None
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
if self._accum_steps is None:
UpperCAmelCase__ : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , snake_case__ : Optional[int] ):
'''simple docstring'''
if not self._gradients:
UpperCAmelCase__ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}""" )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def UpperCamelCase ( self : str ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 199
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Union[str, Any] = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = 42
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Dict , UpperCamelCase_ : PriorTransformer , UpperCamelCase_ : CLIPVisionModel , UpperCamelCase_ : CLIPImageProcessor , UpperCamelCase_ : HeunDiscreteScheduler , UpperCamelCase_ : ShapERenderer , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
if latents is None:
lowerCamelCase_ : str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ : List[str] = latents.to(UpperCamelCase_ )
lowerCamelCase_ : int = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : Tuple = torch.device(F"""cuda:{gpu_id}""" )
lowerCamelCase_ : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Any = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 )
if not isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCamelCase_ : Any = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ : str = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self.image_encoder(UpperCamelCase_ )['''last_hidden_state''']
lowerCamelCase_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ : List[str] = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ : Dict = torch.zeros_like(UpperCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 25 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : float = 4.0 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowerCamelCase_ : List[Any] = 1
elif isinstance(UpperCamelCase_ , torch.Tensor ):
lowerCamelCase_ : Union[str, Any] = image.shape[0]
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ : str = len(UpperCamelCase_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}""" )
lowerCamelCase_ : List[Any] = self._execution_device
lowerCamelCase_ : Optional[Any] = batch_size * num_images_per_prompt
lowerCamelCase_ : Optional[int] = guidance_scale > 1.0
lowerCamelCase_ : Any = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# prior
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowerCamelCase_ : Dict = self.scheduler.timesteps
lowerCamelCase_ : List[Any] = self.prior.config.num_embeddings
lowerCamelCase_ : Optional[int] = self.prior.config.embedding_dim
lowerCamelCase_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ : Tuple = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : List[Any] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Tuple = self.prior(
UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ : List[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = noise_pred.chunk(2 )
lowerCamelCase_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ : str = self.scheduler.step(
UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase_ )
lowerCamelCase_ : Dict = []
for i, latent in enumerate(UpperCamelCase_ ):
print()
lowerCamelCase_ : Optional[int] = self.renderer.decode(
latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.stack(UpperCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCamelCase_ : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ : str = [self.numpy_to_pil(UpperCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase_ )
| 501
| 0
|
lowerCAmelCase__: Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Dict = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase__: list[bool | None] = [None] * 1000_0000
lowerCAmelCase__: Optional[int] = True
lowerCAmelCase__: str = False
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE_ : Optional[Any] = chain(next_number(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Any = number_chain
while number < 1000_0000:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = number_chain
number *= 10
return number_chain
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1000_0000 ) -> int:
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 311
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__: Optional[Any] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: str = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Any = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase__: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 311
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase :Tuple , _UpperCAmelCase :Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
A_ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( _UpperCAmelCase :Dict , _UpperCAmelCase :Any , _UpperCAmelCase :Optional[Any]=False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ''''''
else:
A_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A_ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( _UpperCAmelCase :str ) -> str:
'''simple docstring'''
A_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( _UpperCAmelCase :Dict , _UpperCAmelCase :Optional[int] , _UpperCAmelCase :Dict ) -> str:
'''simple docstring'''
A_ = dct.pop(__SCREAMING_SNAKE_CASE )
A_ = val
def UpperCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
A_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase :Dict , _UpperCAmelCase :Optional[int] , _UpperCAmelCase :List[str]=False ) -> Optional[int]:
'''simple docstring'''
A_ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__SCREAMING_SNAKE_CASE , )
A_ = ViTHybridConfig(backbone_config=__SCREAMING_SNAKE_CASE , image_size=384 , num_labels=1000 )
A_ = False
# load original model from timm
A_ = timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__SCREAMING_SNAKE_CASE )
A_ = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
A_ = '''huggingface/label-files'''
A_ = '''imagenet-1k-id2label.json'''
A_ = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
A_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTHybridModel(__SCREAMING_SNAKE_CASE ).eval()
else:
A_ = ViTHybridForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# create image processor
A_ = create_transform(**resolve_data_config({} , model=__SCREAMING_SNAKE_CASE ) )
A_ = transform.transforms
A_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
A_ = ViTHybridImageProcessor(
do_resize=__SCREAMING_SNAKE_CASE , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__SCREAMING_SNAKE_CASE , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__SCREAMING_SNAKE_CASE , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ = prepare_img()
A_ = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
A_ = processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# verify logits
with torch.no_grad():
A_ = model(__SCREAMING_SNAKE_CASE )
A_ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
A_ = timm_model.forward_features(__SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
A_ = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(f'ybelkada/{vit_name}' )
processor.push_to_hub(f'ybelkada/{vit_name}' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a__ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188
|
'''simple docstring'''
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> List[Any]:
_UpperCamelCase =''''''
_UpperCamelCase =''''''
_UpperCamelCase =[]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase =self.__min_dist_top_down_dp(UpperCamelCase__ , n - 1 )
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , UpperCamelCase__ )
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =worda
_UpperCamelCase =worda
_UpperCamelCase =[[-1 for _ in range(len(UpperCamelCase__ ) )] for _ in range(len(UpperCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase__ ) - 1 , len(UpperCamelCase__ ) - 1 )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =worda
_UpperCamelCase =worda
_UpperCamelCase =len(UpperCamelCase__ )
_UpperCamelCase =len(UpperCamelCase__ )
_UpperCamelCase =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase =j
elif j == 0: # second string is empty
_UpperCamelCase =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase =self.dp[i - 1][j - 1]
else:
_UpperCamelCase =self.dp[i][j - 1]
_UpperCamelCase =self.dp[i - 1][j]
_UpperCamelCase =self.dp[i - 1][j - 1]
_UpperCamelCase =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : int = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
__lowerCamelCase : Optional[int] = input('Enter the first string: ').strip()
__lowerCamelCase : Optional[int] = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 404
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Optional[Any] = 'docs/source/en/_toctree.yml'
def A_( A : Dict):
UpperCamelCase = defaultdict(A)
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']})
else:
new_doc_list.append(A)
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key})
if len(A) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.')
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1])
UpperCamelCase = sorted(A , key=lambda A: s["title"].lower())
# "overview" gets special treatment and is always first
if len(A) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.')
overview_doc.extend(A)
# Sort
return overview_doc
def A_( A : Tuple=False):
with open(A , encoding='utf-8') as f:
UpperCamelCase = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]['sections']
UpperCamelCase = clean_doc_toc(A)
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(A , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(A , allow_unicode=A))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
def A_( A : Dict=False):
with open(A , encoding='utf-8') as f:
UpperCamelCase = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]['sections']
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc['section']
UpperCamelCase = clean_doc_toc(A)
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(A)
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(A)
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(A , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(A , allow_unicode=A))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase : int = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 432
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
if len(A_ ) == 0 or len(A_ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(A_ ) )
if isinstance(A_ , A_ ):
UpperCamelCase = [sequences]
UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_=ZeroShotClassificationArgumentHandler() , *A_ , **A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = args_parser
super().__init__(*A_ , **A_ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCAmelCase_ ( self , A_ , A_=True , A_=True , A_=TruncationStrategy.ONLY_FIRST , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCamelCase = self.tokenizer.eos_token
try:
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=A_ , )
except Exception as e:
if "too short" in str(A_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
if kwargs.get('multi_class' , A_ ) is not None:
UpperCamelCase = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['hypothesis_template']
UpperCamelCase = {}
if "multi_label" in kwargs:
UpperCamelCase = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , A_ , *A_ , **A_ , )-> int:
'''simple docstring'''
if len(A_ ) == 0:
pass
elif len(A_ ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_=None , A_="This example is {}." )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._args_parser(A_ , A_ , A_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(A_ , A_ ) ):
UpperCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A_ ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = inputs['candidate_label']
UpperCamelCase = inputs['sequence']
UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase = self.model(**A_ )
UpperCamelCase = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self , A_ , A_=False )-> List[str]:
'''simple docstring'''
UpperCamelCase = [outputs['candidate_label'] for outputs in model_outputs]
UpperCamelCase = [outputs['sequence'] for outputs in model_outputs]
UpperCamelCase = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCamelCase = logits.shape[0]
UpperCamelCase = len(A_ )
UpperCamelCase = N // n
UpperCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase = self.entailment_id
UpperCamelCase = -1 if entailment_id == 0 else 0
UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase = reshaped_outputs[..., self.entailment_id]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 432
| 1
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = CLIPConfig
A_ = ["CLIPEncoderLayer"]
def __init__( self: List[Any] , __A: CLIPConfig ) -> Dict:
super().__init__(__A )
_A = CLIPVisionModelWithProjection(config.vision_config )
_A = nn.Linear(config.vision_config.projection_dim , 1 )
_A = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self: Optional[int] , __A: Any , __A: Dict , __A: Optional[Any]=0.5 , __A: Tuple=0.5 ) -> Optional[Any]:
_A = self.vision_model(__A )[0]
_A = self.p_head(__A )
_A = nsfw_detected.flatten()
_A = nsfw_detected > p_threshold
_A = nsfw_detected.tolist()
if any(__A ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(__A ):
if nsfw_detected_:
_A = np.zeros(images[idx].shape )
_A = self.w_head(__A )
_A = watermark_detected.flatten()
_A = watermark_detected > w_threshold
_A = watermark_detected.tolist()
if any(__A ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(__A ):
if watermark_detected_:
_A = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 484
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: List[str] , __A: List[str] , __A: List[str]=7 , __A: Tuple=3 , __A: Optional[int]=30 , __A: Optional[Any]=4_00 , __A: int=True , __A: str=None , __A: int=True , __A: Any=[0.5, 0.5, 0.5] , __A: Dict=[0.5, 0.5, 0.5] , __A: Dict=True , __A: str=1 / 2_55 , __A: Dict=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def __A ( self: Optional[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self: Any , __A: Optional[Any] , __A: int=False ) -> List[str]:
if not batched:
_A = image_inputs[0]
if isinstance(__A , Image.Image ):
_A ,_A = image.size
else:
_A ,_A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A ,_A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__A , key=lambda __A : item[0] )[0]
_A = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = DeformableDetrImageProcessor if is_vision_available() else None
def __A ( self: List[str] ) -> List[str]:
_A = DeformableDetrImageProcessingTester(self )
@property
def __A ( self: Tuple ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self: Any ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''do_rescale''' ) )
self.assertTrue(hasattr(__A , '''do_pad''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def __A ( self: Tuple ) -> Optional[Any]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __A )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __A )
def __A ( self: Dict ) -> Any:
pass
def __A ( self: str ) -> List[str]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A )
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self: str ) -> Tuple:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self: int ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self: Optional[Any] ) -> Tuple:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
_A = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
_A = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
_A = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def __A ( self: Dict ) -> Optional[int]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
_A = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
_A = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
_A = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
| 484
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase__: ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__: str = "text"
UpperCAmelCase__: str = "labels"
def __A ( self , A__ ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , A__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
A__ : Optional[Any] = copy.deepcopy(self )
A__ : Tuple = self.label_schema.copy()
A__ : List[Any] = features[self.label_column]
A__ : str = label_schema
return task_template
@property
def __A ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64
| 1
|
def a_ (__A ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__a : Any = sum(__A ) / len(__A ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__(self: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: List[str]=13 , __UpperCAmelCase: Any=7 , __UpperCAmelCase: List[str]=True , __UpperCAmelCase: Optional[int]=True , __UpperCAmelCase: Dict=True , __UpperCAmelCase: Optional[Any]=True , __UpperCAmelCase: Optional[int]=99 , __UpperCAmelCase: Optional[Any]=32 , __UpperCAmelCase: int=5 , __UpperCAmelCase: Dict=4 , __UpperCAmelCase: Optional[int]=37 , __UpperCAmelCase: int="gelu" , __UpperCAmelCase: Tuple=0.1 , __UpperCAmelCase: Any=0.1 , __UpperCAmelCase: Union[str, Any]=512 , __UpperCAmelCase: Optional[Any]=16 , __UpperCAmelCase: List[Any]=2 , __UpperCAmelCase: str=0.02 , __UpperCAmelCase: int=4 , ) -> str:
'''simple docstring'''
__a : Tuple = parent
__a : int = batch_size
__a : Optional[int] = seq_length
__a : List[Any] = is_training
__a : Tuple = use_attention_mask
__a : Optional[int] = use_token_type_ids
__a : Tuple = use_labels
__a : str = vocab_size
__a : Union[str, Any] = hidden_size
__a : List[str] = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : Any = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : List[Any] = initializer_range
__a : int = num_choices
def UpperCAmelCase__ (self: Tuple ) -> List[Any]:
'''simple docstring'''
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Dict = None
if self.use_attention_mask:
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ (self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a , __a : Dict = config_and_inputs
__a : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase__ (self: Dict ) -> str:
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a , __a : Tuple = config_and_inputs
__a : int = True
__a : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = True
snake_case__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ (self: Dict ) -> Union[str, Any]:
'''simple docstring'''
__a : Tuple = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase__ (self: Optional[Any] ) -> List[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained("roberta-base" , from_pt=__UpperCAmelCase )
__a : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 351
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( lowerCAmelCase__ : str ):
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCAmelCase__ :
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[int] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[int] , A__ : Dict=None , **A__ : str ) -> Dict:
'''simple docstring'''
a__ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(A__ , A__ )
a__ : int = TFVisionTextDualEncoderModel(A__ )
a__ : Any = model(input_ids=A__ , pixel_values=A__ , attention_mask=A__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase ( self : Optional[Any] , A__ : Union[str, Any] , A__ : Dict , A__ : Tuple , A__ : int , A__ : Union[str, Any]=None , **A__ : Tuple ) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(A__ , A__ )
a__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=A__ , text_model=A__ )
a__ : str = model(input_ids=A__ , pixel_values=A__ , attention_mask=A__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : Optional[Any] , A__ : Dict , A__ : Tuple , A__ : str , A__ : Optional[int] , A__ : List[Any]=None , **A__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(A__ , A__ )
a__ : Tuple = {'''vision_model''': vision_model, '''text_model''': text_model}
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**A__ )
a__ : Any = model(input_ids=A__ , pixel_values=A__ , attention_mask=A__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : Optional[Any] , A__ : Tuple , A__ : List[Any] , A__ : Optional[Any] , A__ : str , A__ : Dict=None , **A__ : Union[str, Any] ) -> str:
'''simple docstring'''
a__ , a__ : Tuple = self.get_vision_text_model(A__ , A__ )
a__ : int = TFVisionTextDualEncoderModel(vision_model=A__ , text_model=A__ )
a__ : Any = model(input_ids=A__ , pixel_values=A__ , attention_mask=A__ )
a__ : Dict = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__ )
a__ : List[str] = TFVisionTextDualEncoderModel.from_pretrained(A__ )
a__ : Dict = model(input_ids=A__ , pixel_values=A__ , attention_mask=A__ )
a__ : Optional[int] = after_output[0].numpy()
a__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A__ , 1E-5 )
def __lowerCAmelCase ( self : List[Any] , A__ : int , A__ : Optional[int] , A__ : List[Any] , A__ : Union[str, Any] , A__ : Optional[int]=None , **A__ : int ) -> Tuple:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(A__ , A__ )
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=A__ , text_model=A__ )
a__ : Any = model(
input_ids=A__ , pixel_values=A__ , attention_mask=A__ , output_attentions=A__ )
a__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(A__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[Any] = to_atuple(vision_model.config.image_size )
a__ : List[Any] = to_atuple(vision_model.config.patch_size )
a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(A__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Optional[Any] , A__ : np.ndarray , A__ : np.ndarray , A__ : float ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(A__ , A__ , F'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**A__ )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A__ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A__ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**A__ )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A__ )
@slow
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_pretrained_model_and_inputs()
a__ : str = model_a(**A__ )
a__ : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A__ )
a__ : Any = TFVisionTextDualEncoderModel.from_pretrained(A__ )
a__ : List[Any] = model_a(**A__ )
a__ : Any = after_outputs[0].numpy()
a__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A__ , 1E-5 )
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
a__ : Optional[int] = 1_3
a__ : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a__ : Tuple = random_attention_mask([batch_size, 4] )
a__ : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : int , A__ : Any , A__ : Union[str, Any] ) -> int:
'''simple docstring'''
a__ : Any = TFViTModel(A__ , name='''vision_model''' )
a__ : str = TFBertModel(A__ , name='''text_model''' )
return vision_model, text_model
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ : Any = TFViTModelTester(self )
a__ : List[str] = TFBertModelTester(self )
a__ : int = vit_model_tester.prepare_config_and_inputs()
a__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Dict = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
a__ : Any = 1_3
a__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a__ : Optional[Any] = random_attention_mask([batch_size, 4] )
a__ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : Any , A__ : List[str] , A__ : List[str] , A__ : int , A__ : List[Any] , A__ : Any=None , **A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ , a__ : List[str] = self.get_vision_text_model(A__ , A__ )
a__ : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=A__ , text_model=A__ )
a__ : Any = model(
input_ids=A__ , pixel_values=A__ , attention_mask=A__ , output_attentions=A__ )
a__ : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(A__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[str] = to_atuple(vision_model.config.image_size )
a__ : Any = to_atuple(vision_model.config.patch_size )
a__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : str = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a__ : Dict = output.text_model_output.attentions
self.assertEqual(len(A__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : List[Any] , A__ : Optional[int] , A__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] = TFDeiTModel(A__ , name='''vision_model''' )
a__ : Optional[int] = TFRobertaModel(A__ , name='''text_model''' )
return vision_model, text_model
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = TFDeiTModelTester(self )
a__ : Any = TFRobertaModelTester(self )
a__ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
a__ : Tuple = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : List[Any] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
a__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
a__ : Any = 1_3
a__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
a__ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
a__ : Dict = random_attention_mask([batch_size, 4] )
a__ : Union[str, Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : Optional[int] , A__ : str , A__ : List[str] ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = TFCLIPVisionModel(A__ , name='''vision_model''' )
a__ : Tuple = TFBertModel(A__ , name='''text_model''' )
return vision_model, text_model
def __lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : Dict = TFCLIPVisionModelTester(self )
a__ : List[Any] = TFBertModelTester(self )
a__ : str = clip_model_tester.prepare_config_and_inputs()
a__ : Tuple = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : Optional[int] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : int = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=A__ )
a__ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
a__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
a__ : List[str] = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=A__ , padding=A__ , return_tensors='''np''' )
a__ : str = model(**A__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : Optional[int] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , A__ , atol=1E-3 ) )
| 340
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase__ ) as metadata_file:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : str = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
a__ : Tuple = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
a__ : str = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
a__ : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a__ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
a__ : Union[str, Any] = AddedToken('''<ent>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
a__ : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
a__ : Union[str, Any] = json.load(lowerCAmelCase__ )
a__ : List[str] = '''MLukeTokenizer'''
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
a__ : str = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
a__ : Any = state_dict['''embeddings.word_embeddings.weight''']
a__ : str = word_emb[ent_init_index].unsqueeze(0 )
a__ : Optional[int] = word_emb[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a__ : Union[str, Any] = state_dict[bias_name]
a__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a__ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
a__ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a__ : List[Any] = F'encoder.layer.{layer_index}.attention.self.'
a__ : str = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
a__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a__ : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
a__ : Union[str, Any] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a__ : List[Any] = state_dict['''entity_predictions.bias''']
a__ : str = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
a__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
a__ : Optional[Any] = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
a__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
a__ : Dict = state_dict[key]
else:
a__ : List[Any] = state_dict[key]
a__ , a__ : List[str] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a__ : Any = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='''entity_classification''' )
a__ : Optional[int] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
a__ : List[str] = (0, 9)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Tuple = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Union[str, Any] = torch.Size((1, 33, 768) )
a__ : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a__ : Tuple = torch.Size((1, 1, 768) )
a__ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
a__ : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Tuple = '''Tokyo is the capital of <mask>.'''
a__ : str = (24, 30)
a__ : Tuple = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='''pt''' )
a__ : Any = model(**lowerCAmelCase__ )
a__ : Optional[int] = encoding['''input_ids'''][0].tolist()
a__ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
a__ : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
a__ : Optional[int] = outputs.entity_logits[0][0].argmax().item()
a__ : Union[str, Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : str ):
a__ : int = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
a__ : Optional[Any] = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
a__ : Any = {}
for entry in data:
a__ : Tuple = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a__ : Union[str, Any] = entity_id
break
a__ : Dict = F'{language}:{entity_name}'
a__ : Dict = entity_id
return new_mapping
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 340
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' )
return image
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = val
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE_ : str = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(lowerCamelCase_ , requires_grad=lowerCamelCase_ ), v_bias) )
SCREAMING_SNAKE_CASE_ : List[str] = qkv_bias
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_64 if 'coco' in model_name else 2_24
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipVisionConfig(image_size=lowerCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipConfig(vision_config=lowerCamelCase_ , text_config=lowerCamelCase_ , qformer_config=lowerCamelCase_ )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=None , lowerCamelCase_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = get_blipa_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = InstructBlipForConditionalGeneration(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : str = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cuda:1' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE_ : Any = 'cuda:2' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = load_model_and_preprocess(
name=lowerCamelCase_ , model_type=lowerCamelCase_ , is_eval=lowerCamelCase_ , device=lowerCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
SCREAMING_SNAKE_CASE_ : Optional[int] = original_model.state_dict()
SCREAMING_SNAKE_CASE_ : Dict = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(lowerCamelCase_ )
if key.startswith('Qformer.bert' ):
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('self' , 'attention' )
if "llm_proj" in key:
SCREAMING_SNAKE_CASE_ : Any = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE_ : str = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
SCREAMING_SNAKE_CASE_ : Any = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('t5' , 'language' )
SCREAMING_SNAKE_CASE_ : Optional[int] = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase_ , lowerCamelCase_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = load_demo_image()
SCREAMING_SNAKE_CASE_ : Any = 'What is unusual about this image?'
# create processor
SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = InstructBlipProcessor(
image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = processor(images=lowerCamelCase_ , text=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# make sure processor creates exact same pixel values
SCREAMING_SNAKE_CASE_ : str = vis_processors['eval'](lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
hf_model.to(lowerCamelCase_ )
with torch.no_grad():
if "vicuna" in model_name:
SCREAMING_SNAKE_CASE_ : str = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
SCREAMING_SNAKE_CASE_ : Tuple = hf_model(**lowerCamelCase_ ).logits
else:
SCREAMING_SNAKE_CASE_ : Any = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
SCREAMING_SNAKE_CASE_ : str = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
SCREAMING_SNAKE_CASE_ : Dict = hf_model(**lowerCamelCase_ , labels=lowerCamelCase_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
SCREAMING_SNAKE_CASE_ : List[Any] = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowerCamelCase_ , atol=lowerCamelCase_ )
print('Looks ok!' )
print('Generating with original model...' )
SCREAMING_SNAKE_CASE_ : List[Any] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hf_model.generate(
**lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
print('Original generation:' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [text.strip() for text in output_text]
print('HF generation:' , lowerCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
UpperCamelCase__ : int = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 105
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase__ : Optional[Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Any = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
import re
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : str = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_encoder_block_conv_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_encoder_block_conv_in.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : str = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_decoder_block_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_decoder_block_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Dict = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_decoder_block_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_decoder_block_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = re_prior_cond_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Dict = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_prior_cond_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# keep original key
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_key
SCREAMING_SNAKE_CASE_ : Optional[Any] = replace_key(lowerCamelCase_ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
SCREAMING_SNAKE_CASE_ : str = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
SCREAMING_SNAKE_CASE_ : Dict = original_key
SCREAMING_SNAKE_CASE_ : int = original_key
SCREAMING_SNAKE_CASE_ : int = value
return new_dict
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : int=None , lowerCamelCase_ : int=None ) -> Dict:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
SCREAMING_SNAKE_CASE_ : int = requests.get(F'{PREFIX}{file}' , allow_redirects=lowerCamelCase_ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=lowerCamelCase_ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
SCREAMING_SNAKE_CASE_ : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JukeboxConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = JukeboxModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for i, dict_name in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
SCREAMING_SNAKE_CASE_ : int = {}
for k in old_dic.keys():
if k.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = old_dic[k]
elif k.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE_ : int = old_dic[k]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = old_dic[k]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'vqvae' if i == 0 else F'priors.{3 - i}'
SCREAMING_SNAKE_CASE_ : Any = fix_jukebox_keys(lowerCamelCase_ , model.state_dict() , lowerCamelCase_ , lowerCamelCase_ )
weight_dict.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
return weight_dict
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCamelCase__ : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 105
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=224 , __SCREAMING_SNAKE_CASE : str=1000 , __SCREAMING_SNAKE_CASE : str=[3, 3, 6, 4] , __SCREAMING_SNAKE_CASE : List[Any]=[48, 56, 112, 220] , ) -> Optional[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = num_labels
lowerCamelCase_ = image_size
lowerCamelCase_ = layer_depths
lowerCamelCase_ = embed_dims
def UpperCamelCase ( self : Dict ) -> List[str]:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Union[str, Any] ) -> str:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__SCREAMING_SNAKE_CASE , layer_scale_init_value=1e-5 , )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
lowerCamelCase_ = SwiftFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = SwiftFormerForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCamelCase_ = SwiftFormerForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = self.prepare_config_and_inputs()
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( __snake_case , __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Tuple = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Tuple = False
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowerCamelCase_ = SwiftFormerModelTester(self )
lowerCamelCase_ = ConfigTester(
self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def UpperCamelCase ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def UpperCamelCase ( self : int ) -> List[Any]:
pass
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCamelCase ( self : int ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : int ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Tuple ) -> str:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = SwiftFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = 8
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> List[str]:
def _config_zero_init(__SCREAMING_SNAKE_CASE : Optional[Any] ):
lowerCamelCase_ = copy.deepcopy(__SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1e-1_0 )
if isinstance(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = _config_zero_init(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return configs_no_init
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
def lowerCamelCase__ ( ) -> Optional[int]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Tuple ) -> Any:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Dict ) -> Optional[int]:
lowerCamelCase_ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 137
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ) -> int:
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError('Invalid Input' )
lowerCamelCase_ = lowerCamelCase_ = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
lowerCamelCase_ = current_sum - array[i] + array[i + k]
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_SCREAMING_SNAKE_CASE : int = [randint(-1000, 1000) for i in range(100)]
_SCREAMING_SNAKE_CASE : Union[str, Any] = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 137
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = 1
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = True
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCamelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 606
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1000 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = LiltModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(
SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return True
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.Size([1, 2, 768] )
UpperCamelCase = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 606
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = params
_snake_case = np.array(lowercase_ )
_snake_case = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[str] , __lowerCamelCase : str ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.lengths )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.params.max_model_input_size
_snake_case = self.lengths > max_len
logger.info(f"""Splitting {sum(lowercase_ )} too long sequences.""" )
def divide_chunks(__lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
_snake_case = []
_snake_case = []
if self.params.mlm:
_snake_case , _snake_case = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
_snake_case , _snake_case = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_snake_case = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_snake_case = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
_snake_case = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
_snake_case = np.array(lowercase_ )
_snake_case = np.array(lowercase_ )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = len(self )
_snake_case = self.lengths > 1_1
_snake_case = self.token_ids[indices]
_snake_case = self.lengths[indices]
_snake_case = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
_snake_case = self.params.special_tok_ids['''unk_token''']
_snake_case = len(self )
_snake_case = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_snake_case = (unk_occs / self.lengths) < 0.5
_snake_case = self.token_ids[indices]
_snake_case = self.lengths[indices]
_snake_case = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = [t[0] for t in batch]
_snake_case = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
_snake_case = max(lowercase_ )
# Pad token ids
if self.params.mlm:
_snake_case = self.params.special_tok_ids['''pad_token''']
else:
_snake_case = self.params.special_tok_ids['''unk_token''']
_snake_case = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
_snake_case = torch.tensor(tk_ ) # (bs, max_seq_len_)
_snake_case = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 716
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_snake_case , _snake_case = array[indexa], array[indexa]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
for i in range(lowerCAmelCase_ , low + middle ):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 )
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0 )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = input('''Enter numbers separated by a comma:\n''').strip()
snake_case = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 404
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A_ : List[str] = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Any , lowercase_: List[str] , lowercase_: List[str]=None , lowercase_: Tuple=None ) -> str:
# Recurse if needed
if "." in tensor_name:
A__ : Union[str, Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
A__ : Tuple = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A__ : Dict = new_module
A__ : List[Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
A__ : List[Any] = tensor_name in module._buffers
A__ : List[Any] = getattr(lowercase_ , lowercase_ )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
A__ : Any = False
A__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
A__ : str = False
A__ : str = False
else:
A__ : Dict = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A__ : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A__ : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A__ : Optional[int] = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ : Dict = value.to("""cpu""" )
if value.dtype == torch.inta:
A__ : Union[str, Any] = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
A__ : Any = torch.tensor(lowercase_ , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None:
A__ : Optional[Any] = new_value.T
A__ : Any = old_value.__dict__
if is_abit:
A__ : Tuple = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
elif is_abit:
A__ : List[str] = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
A__ : Any = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowercase_ ) )
else:
if value is None:
A__ : Optional[Any] = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ : Tuple = value.to(lowercase_ )
else:
A__ : Dict = torch.tensor(lowercase_ , device=lowercase_ )
if is_buffer:
A__ : List[str] = new_value
else:
A__ : int = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad )
A__ : Union[str, Any] = new_value
def UpperCamelCase (lowercase_: int , lowercase_: Optional[int]=None , lowercase_: Optional[int]=None , lowercase_: List[str]=None , lowercase_: Optional[int]=False ) -> List[str]:
for name, module in model.named_children():
if current_key_name is None:
A__ : Any = []
current_key_name.append(lowercase_ )
if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowercase_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase_ , lowercase_ ):
A__ , A__ : Tuple = module.weight.shape
else:
A__ : Optional[int] = module.in_features
A__ : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A__ : Dict = bnb.nn.LinearabitLt(
lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A__ : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A__ : Dict = bnb.nn.Linearabit(
lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A__ : Dict = True
# Store the module class in case we need to transpose the weight later
A__ : Tuple = type(lowercase_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase_ )
if len(list(module.children() ) ) > 0:
A__ , A__ : Any = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None ) -> Optional[Any]:
A__ : Optional[Any] = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
A__ , A__ : int = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def UpperCamelCase (*lowercase_: List[Any] , **lowercase_: Tuple ) -> Optional[Any]:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowercase_ , )
return replace_with_bnb_linear(*lowercase_ , **lowercase_ )
def UpperCamelCase (*lowercase_: List[Any] , **lowercase_: str ) -> Optional[Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowercase_ , )
return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Union[str, Any]:
A__ : Tuple = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A__ : Union[str, Any] = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
A__ : Any = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ : Any = sum(lowercase_ , [] )
A__ : Optional[int] = len(lowercase_ ) > 0
# Check if it is a base model
A__ : Optional[int] = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ : int = list(model.named_children() )
A__ : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
A__ : Union[str, Any] = set(lowercase_ ) - set(lowercase_ )
A__ : Dict = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
A__ : Any = [""".weight""", """.bias"""]
A__ : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ : Dict = name.replace(lowercase_ , """""" )
filtered_module_names.append(lowercase_ )
return filtered_module_names
| 456
|
def UpperCamelCase (lowercase_: list ) -> list:
A__ : Union[str, Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
A__ : Dict = True
for i in range(0 , len(lowercase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
A__ , A__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
A__ : Optional[Any] = False
for i in range(1 , len(lowercase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
A__ , A__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
A__ : Optional[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
A_ : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A_ : Optional[int] = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 456
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : Any = '''xlnet'''
_lowercase : Optional[int] = ['''mems''']
_lowercase : List[Any] = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=3_2000 , a=1024 , a=24 , a=16 , a=4096 , a="gelu" , a=True , a="bi" , a=0.02 , a=1E-12 , a=0.1 , a=512 , a=None , a=True , a=False , a=False , a=-1 , a=False , a="last" , a=True , a="tanh" , a=0.1 , a=5 , a=5 , a=5 , a=1 , a=2 , **a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''')
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''')
SCREAMING_SNAKE_CASE = d_model // n_head
SCREAMING_SNAKE_CASE = ff_activation
SCREAMING_SNAKE_CASE = d_inner
SCREAMING_SNAKE_CASE = untie_r
SCREAMING_SNAKE_CASE = attn_type
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = mem_len
SCREAMING_SNAKE_CASE = reuse_len
SCREAMING_SNAKE_CASE = bi_data
SCREAMING_SNAKE_CASE = clamp_len
SCREAMING_SNAKE_CASE = same_length
SCREAMING_SNAKE_CASE = summary_type
SCREAMING_SNAKE_CASE = summary_use_proj
SCREAMING_SNAKE_CASE = summary_activation
SCREAMING_SNAKE_CASE = summary_last_dropout
SCREAMING_SNAKE_CASE = start_n_top
SCREAMING_SNAKE_CASE = end_n_top
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , a , )
SCREAMING_SNAKE_CASE = kwargs['use_cache']
SCREAMING_SNAKE_CASE = use_mems_eval
SCREAMING_SNAKE_CASE = use_mems_train
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
| 444
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : List[Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 444
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Any ) -> Optional[Any]:
for attribute in key.split('''.''' ):
UpperCAmelCase_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
UpperCAmelCase_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
elif weight_type == "running_mean":
UpperCAmelCase_ = value
elif weight_type == "running_var":
UpperCAmelCase_ = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ = value
elif weight_type == "inv_freq":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ) -> int:
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
UpperCAmelCase_ = mapped_key.replace('''*''' , __UpperCamelCase )
if "pos_bias_u" in name:
UpperCAmelCase_ = None
elif "pos_bias_v" in name:
UpperCAmelCase_ = None
elif "weight_g" in name:
UpperCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ = '''weight_v'''
elif "bias" in name:
UpperCAmelCase_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = '''weight'''
elif "running_mean" in name:
UpperCAmelCase_ = '''running_mean'''
elif "inv_freq" in name:
UpperCAmelCase_ = '''inv_freq'''
elif "running_var" in name:
UpperCAmelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase_ = '''num_batches_tracked'''
else:
UpperCAmelCase_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : int ) -> List[str]:
UpperCAmelCase_ = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ = name.split('''.''' )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCAmelCase_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCAmelCase_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCAmelCase_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCAmelCase_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None , __UpperCamelCase : Optional[Any]=True ) -> Optional[int]:
if config_path is not None:
UpperCAmelCase_ = WavaVecaConformerConfig.from_pretrained(__UpperCamelCase , hidden_act='''swish''' )
else:
UpperCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase_ = '''rotary'''
if is_finetuned:
if dict_path:
UpperCAmelCase_ = Dictionary.load(__UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ = target_dict.pad_index
UpperCAmelCase_ = target_dict.bos_index
UpperCAmelCase_ = target_dict.eos_index
UpperCAmelCase_ = len(target_dict.symbols )
UpperCAmelCase_ = os.path.join(__UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) )
return
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
UpperCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = WavaVecaCTCTokenizer(
__UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , )
UpperCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
UpperCAmelCase_ = WavaVecaConformerForCTC(__UpperCamelCase )
else:
UpperCAmelCase_ = WavaVecaConformerForPreTraining(__UpperCamelCase )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase_ = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase_ = fairseq.tasks.setup_task(__UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCamelCase )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 144
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowerCamelCase = None
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'google/rembert': 2_56,
}
_lowerCamelCase = '▁'
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[Any] = RemBertTokenizer
def __init__( self : List[str] , __snake_case : int=None , __snake_case : str=None , __snake_case : Optional[int]=True , __snake_case : Dict=True , __snake_case : Optional[int]=False , __snake_case : Tuple="[CLS]" , __snake_case : Any="[SEP]" , __snake_case : Dict="<unk>" , __snake_case : List[str]="[SEP]" , __snake_case : Dict="<pad>" , __snake_case : str="[CLS]" , __snake_case : Union[str, Any]="[MASK]" , **__snake_case : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def lowerCamelCase_ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__snake_case ) )
return
UpperCAmelCase_ = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 144
| 1
|
'''simple docstring'''
_lowercase : int ={str(digit): digit**5 for digit in range(10)}
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase__ ) )
def __UpperCAmelCase ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(UpperCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 574
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( UpperCamelCase__ :list[int] , UpperCamelCase__ :int ) -> list[list[int]]:
snake_case__ : list[list[int]] = []
snake_case__ : list[int] = []
snake_case__ : int = 0
snake_case__ : str = sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return result
def __UpperCAmelCase ( UpperCamelCase__ :list[int] , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :list[int] , UpperCamelCase__ :list[list[int]] , UpperCamelCase__ :int , ) -> None:
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__ , UpperCamelCase__ , index + 1 , [*path, nums[index]] , UpperCamelCase__ , remaining_nums_sum - nums[index] , )
_lowercase : List[Any] =[3, 34, 4, 12, 5, 2]
_lowercase : List[str] =9
_lowercase : int =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 574
| 1
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase_ : List[str] = random.Random()
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=1.0 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
if rng is None:
_lowerCamelCase : List[Any] = global_rng
_lowerCamelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict,__A : List[Any],__A : Any=7,__A : Dict=4_0_0,__A : Union[str, Any]=2_0_0_0,__A : Any=1_0,__A : Dict=1_6_0,__A : List[Any]=8,__A : Optional[int]=0.0,__A : int=4_0_0_0,__A : Dict=False,__A : List[Any]=True,):
_lowerCamelCase : int = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : List[Any] = min_seq_length
_lowerCamelCase : Optional[Any] = max_seq_length
_lowerCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : Optional[int] = padding_value
_lowerCamelCase : str = sampling_rate
_lowerCamelCase : int = return_attention_mask
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : str = feature_size
_lowerCamelCase : Tuple = chunk_length
_lowerCamelCase : List[Any] = hop_length
def lowerCamelCase_ ( self : str ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self : int,__A : str=False,__A : Union[str, Any]=False ):
def _flatten(__A : Tuple ):
return list(itertools.chain(*__A ) )
if equal_length:
_lowerCamelCase : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Dict = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Tuple = WhisperFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[int] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class.from_pretrained(__A )
_lowerCamelCase : Optional[int] = feat_extract_first.to_dict()
_lowerCamelCase : Dict = feat_extract_second.to_dict()
_lowerCamelCase : str = feat_extract_first.mel_filters
_lowerCamelCase : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A,__A ) )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Any = os.path.join(__A,"feat_extract.json" )
feat_extract_first.to_json_file(__A )
_lowerCamelCase : int = self.feature_extraction_class.from_json_file(__A )
_lowerCamelCase : Tuple = feat_extract_first.to_dict()
_lowerCamelCase : Any = feat_extract_second.to_dict()
_lowerCamelCase : Dict = feat_extract_first.mel_filters
_lowerCamelCase : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A,__A ) )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
_lowerCamelCase : Union[str, Any] = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase : Optional[Any] = feature_extractor(__A,padding="max_length",return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase : Union[str, Any] = feature_extractor(speech_inputs[0],return_tensors="np" ).input_features
_lowerCamelCase : Dict = feature_extractor(np_speech_inputs[0],return_tensors="np" ).input_features
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test batched
_lowerCamelCase : str = feature_extractor(__A,return_tensors="np" ).input_features
_lowerCamelCase : List[Any] = feature_extractor(__A,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : Dict = np.asarray(__A )
_lowerCamelCase : int = feature_extractor(__A,return_tensors="np" ).input_features
_lowerCamelCase : Any = feature_extractor(__A,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test truncation required
_lowerCamelCase : str = [floats_list((1, x) )[0] for x in range(2_0_0,(feature_extractor.n_samples + 5_0_0),2_0_0 )]
_lowerCamelCase : Dict = [np.asarray(__A ) for speech_input in speech_inputs]
_lowerCamelCase : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase : Union[str, Any] = [np.asarray(__A ) for speech_input in speech_inputs_truncated]
_lowerCamelCase : Tuple = feature_extractor(__A,return_tensors="np" ).input_features
_lowerCamelCase : str = feature_extractor(__A,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
def lowerCamelCase_ ( self : Any ):
import torch
_lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : str = np.random.rand(1_0_0,3_2 ).astype(np.floataa )
_lowerCamelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : List[Any] = feature_extractor.pad([{"input_features": inputs}],return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase : List[str] = feature_extractor.pad([{"input_features": inputs}],return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase_ ( self : Dict,__A : Optional[Any] ):
_lowerCamelCase : List[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy","clean",split="validation" )
# automatic decoding with librispeech
_lowerCamelCase : Dict = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self : Any ):
# fmt: off
_lowerCamelCase : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCamelCase : Optional[Any] = self._load_datasamples(1 )
_lowerCamelCase : Union[str, Any] = WhisperFeatureExtractor()
_lowerCamelCase : str = feature_extractor(__A,return_tensors="pt" ).input_features
self.assertEqual(input_features.shape,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0],__A,atol=1e-4 ) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : List[Any] = self._load_datasamples(1 )[0]
_lowerCamelCase : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase : Any = feat_extract.zero_mean_unit_var_norm([audio],attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1e-3 ) )
| 44
|
def lowerCAmelCase__ ( _a : float , _a : float , _a : float , _a : float , _a : float , ):
snake_case_ : int = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
snake_case_ : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
snake_case_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
snake_case_ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowercase : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 568
| 0
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCAmelCase_ ( __lowerCamelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class a :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Iterable[torch.nn.Parameter] , lowerCamelCase : float = 0.99_99 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[float, int] = 1.0 , lowerCamelCase : Union[float, int] = 2 / 3 , lowerCamelCase : Optional[Any] = None , lowerCamelCase : Dict[str, Any] = None , **lowerCamelCase : Optional[int] , ) -> Optional[Any]:
if isinstance(lowerCamelCase__ , torch.nn.Module ):
__snake_case : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
__snake_case : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__snake_case : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
__snake_case : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
__snake_case : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
__snake_case : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
__snake_case : Tuple = kwargs["min_value"]
__snake_case : Optional[Any] = list(lowerCamelCase__ )
__snake_case : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
__snake_case : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
__snake_case : Union[str, Any] = None
__snake_case : int = decay
__snake_case : Any = min_decay
__snake_case : Optional[int] = update_after_step
__snake_case : str = use_ema_warmup
__snake_case : Union[str, Any] = inv_gamma
__snake_case : Union[str, Any] = power
__snake_case : List[str] = 0
__snake_case : List[str] = None # set in `step()`
__snake_case : Optional[int] = model_cls
__snake_case : Union[str, Any] = model_config
@classmethod
def __snake_case ( cls : int , lowerCamelCase : Tuple , lowerCamelCase : int ) -> "EMAModel":
__snake_case : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
__snake_case : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
__snake_case : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def __snake_case ( self : Optional[int] , lowerCamelCase : int ) -> Dict:
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
__snake_case : int = self.model_cls.from_config(self.model_config )
__snake_case : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def __snake_case ( self : List[str] , lowerCamelCase : int ) -> float:
__snake_case : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__snake_case : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__snake_case : Any = (1 + step) / (10 + step)
__snake_case : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
__snake_case : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __snake_case ( self : str , lowerCamelCase : Iterable[torch.nn.Parameter] ) -> Dict:
if isinstance(lowerCamelCase__ , torch.nn.Module ):
__snake_case : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
__snake_case : Any = parameters.parameters()
__snake_case : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__snake_case : Tuple = self.get_decay(self.optimization_step )
__snake_case : Any = decay
__snake_case : Optional[Any] = 1 - decay
__snake_case : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__snake_case : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def __snake_case ( self : Optional[int] , lowerCamelCase : Iterable[torch.nn.Parameter] ) -> None:
__snake_case : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def __snake_case ( self : int , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None ) -> None:
__snake_case : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def __snake_case ( self : List[Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __snake_case ( self : Optional[int] , lowerCamelCase : Iterable[torch.nn.Parameter] ) -> None:
__snake_case : Tuple = [param.detach().cpu().clone() for param in parameters]
def __snake_case ( self : List[str] , lowerCamelCase : Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
__snake_case : int = None
def __snake_case ( self : Union[str, Any] , lowerCamelCase : dict ) -> None:
__snake_case : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
__snake_case : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
__snake_case : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
__snake_case : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
__snake_case : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
__snake_case : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
__snake_case : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
__snake_case : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
__snake_case : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
__snake_case : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 718
|
from __future__ import annotations
_snake_case : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class a :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : dict[str, list[str]] , lowerCamelCase : str ) -> None:
__snake_case : Tuple = graph
# mapping node to its parent in resulting breadth first tree
__snake_case : dict[str, str | None] = {}
__snake_case : Dict = source_vertex
def __snake_case ( self : Optional[int] ) -> None:
__snake_case : Dict = {self.source_vertex}
__snake_case : List[str] = None
__snake_case : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
__snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase )
__snake_case : Any = vertex
queue.append(lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case : Optional[Any] = self.parent.get(lowerCamelCase )
if target_vertex_parent is None:
__snake_case : Optional[Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowerCamelCase )
return self.shortest_path(lowerCamelCase ) + F'->{target_vertex}'
if __name__ == "__main__":
_snake_case : Optional[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 203
| 0
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 100 ) -> int:
"""simple docstring"""
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 27
|
from __future__ import annotations
a : Optional[Any] = [True] * 1_000_001
a : Union[str, Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
a : Optional[Any] = False
i += 1
def lowerCamelCase__ ( __lowerCamelCase : int ):
return seive[n]
def lowerCamelCase__ ( __lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCamelCase ) and not contains_an_even_digit(__lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
__UpperCAmelCase : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCamelCase ) )]
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
result.append(__lowerCamelCase )
return result
def lowerCamelCase__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 63
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a :
UpperCamelCase : CommonSchedulerState
# setable values
UpperCamelCase : jnp.ndarray
UpperCamelCase : jnp.ndarray
UpperCamelCase : Optional[int] = None
@classmethod
def __snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class a ( lowercase ):
UpperCamelCase : DDPMSchedulerState
class a ( lowercase , lowercase ):
UpperCamelCase : List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCamelCase : jnp.dtype
@property
def __snake_case ( self ):
return True
@register_to_config
def __init__( self , UpperCamelCase_ = 1_000 , UpperCamelCase_ = 0.0001 , UpperCamelCase_ = 0.02 , UpperCamelCase_ = "linear" , UpperCamelCase_ = None , UpperCamelCase_ = "fixed_small" , UpperCamelCase_ = True , UpperCamelCase_ = "epsilon" , UpperCamelCase_ = jnp.floataa , ):
UpperCAmelCase__ : Dict = dtype
def __snake_case ( self , UpperCamelCase_ = None ):
if common is None:
UpperCAmelCase__ : Union[str, Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Optional[Any] = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None ):
return sample
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = () ):
UpperCAmelCase__ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : int = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
UpperCAmelCase__ : Optional[int] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Optional[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(UpperCamelCase_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : int = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : Tuple = variance
UpperCAmelCase__ : Optional[int] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , ):
UpperCAmelCase__ : Tuple = timestep
if key is None:
UpperCAmelCase__ : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Any = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : List[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Dict = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t
UpperCAmelCase__ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : str = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Tuple = jax.random.split(UpperCamelCase_ , num=1 )
UpperCAmelCase__ : Tuple = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self ):
return self.config.num_train_timesteps
| 254
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( lowercase ):
UpperCamelCase : List[Any] = """deit"""
def __init__( self , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=224 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Optional[Any] = qkv_bias
UpperCAmelCase__ : Dict = encoder_stride
class a ( lowercase ):
UpperCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 254
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__a : List[Any] = input("""Enter image url: """).strip()
print(F'Downloading image from {url} ...')
__a : Optional[Any] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
__a : Union[str, Any] = soup.find("""meta""", {"""property""": """og:image"""})['content']
__a : Optional[Any] = requests.get(image_url).content
__a : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'Done. Image saved to disk as {file_name}.')
| 606
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A__ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
lowercase__ = [image]
lowercase__ = [trans(img.convert('''RGB''' ) ) for img in image]
lowercase__ = torch.stack(lowerCamelCase_ )
return image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : Dict, lowerCamelCase : str ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : Optional[int] ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase__ ( self : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : List[str] ):
'''simple docstring'''
# get the original timestep using init_timestep
lowercase__ = min(int(num_inference_steps * strength ), lowerCamelCase )
lowercase__ = max(num_inference_steps - init_timestep, 0 )
lowercase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None ):
'''simple docstring'''
if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}""" )
lowercase__ = image.to(device=lowerCamelCase, dtype=lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase__ = init_latents.shape
lowercase__ = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase )
# get latents
print('''add noise to latents at timestep''', lowerCamelCase )
lowercase__ = self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Dict, lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None, lowerCamelCase : float = 0.8, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : float = 0.0, lowerCamelCase : int = 50, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, ):
'''simple docstring'''
self.check_inputs(lowerCamelCase )
# 2. Preprocess image
lowercase__ = preprocess(lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase, device=self.device )
lowercase__ , lowercase__ = self.get_timesteps(lowerCamelCase, lowerCamelCase, self.device )
lowercase__ = timesteps[:1].repeat(lowerCamelCase )
# 4. Prepare latent variables
lowercase__ = self.prepare_latents(lowerCamelCase, lowerCamelCase, lowerCamelCase, self.unet.dtype, self.device, lowerCamelCase )
lowercase__ = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase ):
# 1. predict noise model_output
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__ = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase, ).prev_sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase )
| 183
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = LEDTokenizer
SCREAMING_SNAKE_CASE__ = LEDTokenizerFast
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase__ = dict(zip(a_ ,range(len(a_ ) ) ) )
lowerCAmelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ = {'unk_token': '<unk>'}
lowerCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(a_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(a_ ) )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ,**a_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(a_ ,max_length=len(a_ ) ,padding=a_ ,return_tensors='pt' )
self.assertIsInstance(a_ ,a_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(a_ ,a_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(a_ ,padding=a_ ,return_tensors='pt' )
self.assertIn('input_ids' ,a_ )
self.assertIn('attention_mask' ,a_ )
self.assertNotIn('labels' ,a_ )
self.assertNotIn('decoder_attention_mask' ,a_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(text_target=a_ ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] ,padding=a_ ,truncation=a_ ,return_tensors='pt' )
self.assertIsInstance(a_ ,a_ )
self.assertEqual(batch.input_ids.shape ,(2, 5122) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['A long paragraph for summarization.']
lowerCAmelCase__ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(a_ ,return_tensors='pt' )
lowerCAmelCase__ = tokenizer(text_target=a_ ,return_tensors='pt' )
lowerCAmelCase__ = inputs['input_ids']
lowerCAmelCase__ = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = ['Summary of the text.', 'Another summary.']
lowerCAmelCase__ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase__ = tokenizer(a_ ,padding=a_ )
lowerCAmelCase__ = [[0] * len(a_ ) for x in encoded_output['input_ids']]
lowerCAmelCase__ = tokenizer.pad(a_ )
self.assertSequenceEqual(outputs['global_attention_mask'] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(a_ ,**a_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(a_ ,**a_ )
lowerCAmelCase__ = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase__ = tokenizer_r.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
lowerCAmelCase__ = tokenizer_p.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
a_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
a_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 700
|
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase__ = []
lowerCAmelCase__ = list(range(snake_case__ ) )
# Find permutation
while factorials:
lowerCAmelCase__ = factorials.pop()
lowerCAmelCase__ , lowerCAmelCase__ = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase_ : List[Any] = '''src/diffusers'''
lowerCAmelCase_ : List[str] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase_ : str = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase_ : str = spec.loader.load_module()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return line.startswith(lowerCAmelCase ) or len(lowerCAmelCase ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase ) is not None
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = object_name.split(""".""" )
UpperCAmelCase = 0
# First let's find the module where our object lives.
UpperCAmelCase = parts[i]
while i < len(lowerCAmelCase ) and not os.path.isfile(os.path.join(lowerCAmelCase , F'''{module}.py''' ) ):
i += 1
if i < len(lowerCAmelCase ):
UpperCAmelCase = os.path.join(lowerCAmelCase , parts[i] )
if i >= len(lowerCAmelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(lowerCAmelCase , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase = """"""
UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase = line_index
while line_index < len(lowerCAmelCase ) and _should_continue(lines[line_index] , lowerCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase = lines[start_index:line_index]
return "".join(lowerCAmelCase )
lowerCAmelCase_ : List[Any] = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase_ : Optional[Any] = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase_ : int = re.compile(R'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = code.split("""\n""" )
UpperCAmelCase = 0
while idx < len(lowerCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = len(get_indent(lowerCAmelCase ) ) > 0
if has_indent:
UpperCAmelCase = F'''class Bla:\n{code}'''
UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase )
UpperCAmelCase = black.format_str(lowerCAmelCase , mode=lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = style_docstrings_in_code(lowerCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
with open(lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = []
UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase ):
UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = search.groups()
UpperCAmelCase = find_code_in_diffusers(lowerCAmelCase )
UpperCAmelCase = get_indent(lowerCAmelCase )
UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase = theoretical_indent
UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase = True
while line_index < len(lowerCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase ):
break
UpperCAmelCase = lines[line_index]
UpperCAmelCase = _should_continue(lowerCAmelCase , lowerCAmelCase ) and re.search(F'''^{indent}# End copy''' , lowerCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase = lines[start_index:line_index]
UpperCAmelCase = """""".join(lowerCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase ) is None]
UpperCAmelCase = """\n""".join(lowerCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase ) > 0:
UpperCAmelCase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
UpperCAmelCase = [_re_replace_pattern.search(lowerCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = pattern.groups()
UpperCAmelCase = re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if option.strip() == "all-casing":
UpperCAmelCase = re.sub(obja.lower() , obja.lower() , lowerCAmelCase )
UpperCAmelCase = re.sub(obja.upper() , obja.upper() , lowerCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase = start_index + 1
if overwrite and len(lowerCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase )
return diffs
def _lowerCAmelCase ( lowerCAmelCase = False ):
'''simple docstring'''
UpperCAmelCase = glob.glob(os.path.join(lowerCAmelCase , """**/*.py""" ) , recursive=lowerCAmelCase )
UpperCAmelCase = []
for filename in all_files:
UpperCAmelCase = is_copy_consistent(lowerCAmelCase , lowerCAmelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowerCAmelCase ) > 0:
UpperCAmelCase = """\n""".join(lowerCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase_ : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 673
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( a_ , unittest.TestCase ):
_A : str = VideoToVideoSDPipeline
_A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
_A : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
_A : int = PipelineTesterMixin.required_optional_params - {'latents'}
_A : List[str] = False
# No `output_type`.
_A : Any = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
UpperCAmelCase = CLIPTextModel(snake_case__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase_ ( self , snake_case__ , snake_case__=0 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**snake_case__ )
UpperCAmelCase = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase = """np"""
UpperCAmelCase = sd_pipe(**snake_case__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=snake_case__ )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = """Spiderman is surfing"""
UpperCAmelCase = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 673
| 1
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : int = np.max(_outputs , axis=-1 , keepdims=_lowerCAmelCase )
lowercase__ : List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = "sigmoid"
lowerCamelCase__ : int = "softmax"
lowerCamelCase__ : Union[str, Any] = "none"
@add_end_docstrings(
_a , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = False
lowerCamelCase__ : Optional[Any] = ClassificationFunction.NONE
def __init__( self , **a ) -> List[str]:
super().__init__(**a )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCAmelCase ( self , a=None , a=None , a="" , **a ) -> Optional[Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowercase__ : Optional[int] = tokenizer_kwargs
lowercase__ : Any = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowercase__ : Union[str, Any] = self.model.config.return_all_scores
if isinstance(a , a ) or top_k is None:
lowercase__ : str = top_k
lowercase__ : str = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
lowercase__ : int = None
else:
lowercase__ : List[str] = 1
if isinstance(a , a ):
lowercase__ : List[Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase__ : Any = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a ) -> Dict:
lowercase__ : Tuple = super().__call__(*a , **a )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase__ : Optional[int] = 'top_k' not in kwargs
if isinstance(args[0] , a ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCAmelCase ( self , a , **a ) -> Dict[str, GenericTensor]:
lowercase__ : str = self.framework
if isinstance(a , a ):
return self.tokenizer(**a , return_tensors=a , **a )
elif isinstance(a , a ) and len(a ) == 1 and isinstance(inputs[0] , a ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a )
elif isinstance(a , a ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(a , return_tensors=a , **a )
def _UpperCAmelCase ( self , a ) -> List[Any]:
return self.model(**a )
def _UpperCAmelCase ( self , a , a=None , a=1 , a=True ) -> List[Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase__ : List[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase__ : Optional[int] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowercase__ : Tuple = self.model.config.function_to_apply
else:
lowercase__ : List[str] = ClassificationFunction.NONE
lowercase__ : Any = model_outputs['logits'][0]
lowercase__ : List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase__ : Tuple = sigmoid(a )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase__ : Optional[Any] = softmax(a )
elif function_to_apply == ClassificationFunction.NONE:
lowercase__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase__ : Optional[int] = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a )
]
if not _legacy:
dict_scores.sort(key=lambda a : x["score"] , reverse=a )
if top_k is not None:
lowercase__ : List[Any] = dict_scores[:top_k]
return dict_scores
| 702
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> int:
"""simple docstring"""
UpperCamelCase = "ZinengTang/tvlt-base"
UpperCamelCase = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any] )-> List[Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , **UpperCAmelCase_ : Optional[Any] )-> Tuple:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Any:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCamelCase = np.ones([12_000] )
UpperCamelCase = feature_extractor(UpperCAmelCase_ , return_tensors="np" )
UpperCamelCase = processor(audio=UpperCAmelCase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Any )-> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCamelCase = np.ones([3, 224, 224] )
UpperCamelCase = image_processor(UpperCAmelCase_ , return_tensors="np" )
UpperCamelCase = processor(images=UpperCAmelCase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Dict:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
UpperCamelCase = np.ones([12_000] )
UpperCamelCase = np.ones([3, 224, 224] )
UpperCamelCase = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : int )-> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 554
|
"""simple docstring"""
class __a :
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = name
UpperCamelCase = value
UpperCamelCase = weight
def __repr__( self : int )-> Any:
"""simple docstring"""
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
return self.value
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
return self.name
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[Any]:
"""simple docstring"""
return self.weight
def _SCREAMING_SNAKE_CASE ( self : Any )-> Union[str, Any]:
"""simple docstring"""
return self.value / self.weight
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
for i in range(len(UpperCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
UpperCamelCase = sorted(UpperCAmelCase_ , key=UpperCAmelCase_ , reverse=UpperCAmelCase_ )
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = 0.0, 0.0
for i in range(len(UpperCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( )-> Dict:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554
| 1
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if isinstance(lowercase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case :
def __lowercase( self : Union[str, Any] , a_ : Optional[Any] , a_ : str )-> Tuple:
"""simple docstring"""
pass
def __lowercase( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
pass
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
pass
def __lowercase( self : int , a_ : np.ndarray , a_ : np.ndarray , a_ : float )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = np.abs((a - b) ).max()
self.assertLessEqual(a_ , a_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __lowercase( self : Union[str, Any] , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Any , a_ : str=None , **a_ : List[str] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxVisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : str = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowercase( self : Any , a_ : Any , a_ : str , a_ : Union[str, Any] , a_ : List[str] , a_ : Optional[Any]=None , **a_ : Optional[int] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.get_vision_text_model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Dict = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowercase( self : Union[str, Any] , a_ : int , a_ : Any , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any=None , **a_ : str )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.get_vision_text_model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = after_output[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ , 1e-3 )
def __lowercase( self : List[Any] , a_ : int , a_ : Dict , a_ : str , a_ : Tuple , a_ : Any=None , **a_ : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.get_vision_text_model(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ )
SCREAMING_SNAKE_CASE__ : int = model(
input_ids=a_ , pixel_values=a_ , attention_mask=a_ , output_attentions=a_ )
SCREAMING_SNAKE_CASE__ : str = output.vision_model_output.attentions
self.assertEqual(len(a_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : int = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE__ : Dict = output.text_model_output.attentions
self.assertEqual(len(a_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase( self : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : Optional[int] )-> Tuple:
"""simple docstring"""
pt_model.to(a_ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict
SCREAMING_SNAKE_CASE__ : str = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = pt_model(**a_ ).to_tuple()
SCREAMING_SNAKE_CASE__ : Optional[int] = fx_model(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a_ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : int = FlaxVisionTextDualEncoderModel.from_pretrained(a_ , from_pt=a_ )
SCREAMING_SNAKE_CASE__ : str = fx_model_loaded(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(a_ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = VisionTextDualEncoderModel.from_pretrained(a_ , from_flax=a_ )
pt_model_loaded.to(a_ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_model_loaded(**a_ ).to_tuple()
self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(a_ , pt_output_loaded.numpy() , 4e-2 )
def __lowercase( self : Optional[int] , a_ : int , a_ : int , a_ : Dict )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = VisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = FlaxVisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ )
SCREAMING_SNAKE_CASE__ : str = fx_state
self.check_pt_flax_equivalence(a_ , a_ , a_ )
def __lowercase( self : List[str] , a_ : Union[str, Any] , a_ : str , a_ : Any )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ )
SCREAMING_SNAKE_CASE__ : int = VisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxVisionTextDualEncoderModel(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = load_flax_weights_in_pytorch_model(a_ , fx_model.params )
self.check_pt_flax_equivalence(a_ , a_ , a_ )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a_ )
def __lowercase( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a_ )
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
self.check_save_load(**a_ )
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a_ )
@is_pt_flax_cross_test
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE__ : List[str] = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE__ : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(a_ , a_ , a_ )
self.check_equivalence_flax_to_pt(a_ , a_ , a_ )
@slow
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = model_a(**a_ )
SCREAMING_SNAKE_CASE__ : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : int = model_a(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = after_outputs[0]
SCREAMING_SNAKE_CASE__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a_ , 1e-5 )
@require_flax
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=a_ , text_from_pt=a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = 13
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase( self : List[str] , a_ : Optional[Any] , a_ : Tuple )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = FlaxViTModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxBertModel(a_ )
return vision_model, text_model
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE__ : int = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=a_ , text_from_pt=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = 13
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase( self : Union[str, Any] , a_ : Tuple , a_ : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaxCLIPVisionModel(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxBertModel(a_ )
return vision_model, text_model
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE__ : Any = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a_ , padding=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : str = model(**a_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , a_ , atol=1e-3 ) )
| 636
|
import heapq as hq
import math
from collections.abc import Iterator
class snake_case :
def __init__( self : str , a_ : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = str(id_ )
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : int , a_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Any )-> Dict:
"""simple docstring"""
return self.id
def __lowercase( self : Optional[Any] , a_ : int )-> List[str]:
"""simple docstring"""
self.neighbors.append(a_ )
def __lowercase( self : int , a_ : int , a_ : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = weight
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for u in graph:
SCREAMING_SNAKE_CASE__ : Dict = math.inf
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = graph[:]
while q:
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : int = u
SCREAMING_SNAKE_CASE__ : Any = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _a ( lowercase__ : list , lowercase__ : Vertex ):
'''simple docstring'''
for u in graph:
SCREAMING_SNAKE_CASE__ : List[str] = math.inf
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
SCREAMING_SNAKE_CASE__ : Optional[int] = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
SCREAMING_SNAKE_CASE__ : List[str] = u
SCREAMING_SNAKE_CASE__ : Dict = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "facebook/bart-large-mnli"
__lowerCAmelCase = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__lowerCAmelCase = "text_classifier"
__lowerCAmelCase = AutoTokenizer
__lowerCAmelCase = AutoModelForSequenceClassification
__lowerCAmelCase = ["text", ["text"]]
__lowerCAmelCase = ["text"]
def __UpperCAmelCase ( self : List[str] ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
_A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int ) -> Optional[Any]:
_A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ), [f'This example is {label}' for label in labels], return_tensors='pt', padding='max_length', )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : int ) -> Dict:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 107
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase = len(lowerCamelCase )
__lowercase = max(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# create the counting array
__lowercase = coll_max + 1 - coll_min
__lowercase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCamelCase ):
__lowercase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCamelCase ) ):
__lowercase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return "".join([chr(lowerCamelCase ) for i in counting_sort([ord(lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
__UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted))
| 80
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Optional[Any]:
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowercase__ = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self , **_lowerCamelCase )-> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def snake_case_( self , **_lowerCamelCase )-> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def snake_case_( self )-> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_( self )-> Union[str, Any]:
lowercase__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_( self )-> Tuple:
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def snake_case_( self )-> Tuple:
lowercase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def snake_case_( self )-> Any:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowerCamelCase , return_tensors='''np''' )
lowercase__ = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_( self )-> Optional[int]:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=_lowerCamelCase )
lowercase__ = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_( self )-> Optional[Any]:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCamelCase ):
processor()
def snake_case_( self )-> Optional[int]:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_lowerCamelCase )
lowercase__ = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def snake_case_( self )-> Any:
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Tuple , lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : int ) ->str:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ = '''lm_head'''
lowercase__ = getattr(lowercase , lowercase )
if weight_type is not None:
lowercase__ = getattr(lowercase , lowercase ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Any ) ->Tuple:
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(lowercase )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , lowercase )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : str , lowercase : Optional[int] , lowercase : List[str] ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCAmelCase ( lowercase : Dict , lowercase : Optional[int] , lowercase : Tuple=None , lowercase : Tuple=None , lowercase : Any=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
lowercase__ = UniSpeechConfig.from_pretrained(lowercase )
else:
lowercase__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load_from_json(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(lowercase , '''vocab.json''' )
if not os.path.isdir(lowercase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 4_2
lowercase__ = 4_3
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase , lowercase )
lowercase__ = WavaVecaPhonemeCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase , )
lowercase__ = True if config.feat_extract_norm == '''layer''' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
lowercase__ = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
lowercase__ = UniSpeechForCTC(lowercase )
else:
lowercase__ = UniSpeechForPreTraining(lowercase )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ = model[0].eval()
recursively_load_weights(lowercase , lowercase , lowercase )
hf_unispeech.save_pretrained(lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 318
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _lowercase (self : Any ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def _lowercase (self : List[Any] ):
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
@require_multi_gpu
def _lowercase (self : Dict ):
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase_ = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(__a , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Dict =Accelerator()
SCREAMING_SNAKE_CASE_: Dict =(accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_: Optional[Any] =''
SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_: List[str] =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_: List[str] =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 78
|
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655
| 0
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> List[str]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(__lowerCamelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(__lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
else:
self.scheduler.step(*__lowerCamelCase , **__lowerCamelCase )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
self.scheduler.load_state_dict(__lowerCamelCase )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return self.scheduler.print_lr(*__lowerCamelCase , **__lowerCamelCase )
| 717
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
lowerCAmelCase__ : Union[str, Any] = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ : Union[str, Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__lowercase = kwargs.pop('feature_extractor' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , _UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features['words']
__lowercase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
__lowercase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowercase = images
return encoded_inputs
def a__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}""" )
return images_with_overflow
def a__ ( self : Dict , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a__ ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 688
| 0
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : str ) -> Tuple:
_UpperCamelCase = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_UpperCamelCase = MaskFormerConfig(backbone_config=a__ )
_UpperCamelCase = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_UpperCamelCase = 847
_UpperCamelCase = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_UpperCamelCase = 150
_UpperCamelCase = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_UpperCamelCase = 171
_UpperCamelCase = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_UpperCamelCase = 133
_UpperCamelCase = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_UpperCamelCase = 19
_UpperCamelCase = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_UpperCamelCase = 65
_UpperCamelCase = '''mapillary-vistas-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
return config
def lowercase ( a__ : str ) -> Union[str, Any]:
_UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def lowercase ( a__ : List[Any] , a__ : str , a__ : List[Any] ) -> int:
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowercase ( a__ : List[str] , a__ : List[Any] ) -> int:
_UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:dim, :]
_UpperCamelCase = in_proj_bias[: dim]
_UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase = in_proj_weight[
-dim :, :
]
_UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowercase ( a__ : Optional[Any] , a__ : Any ) -> List[str]:
# fmt: off
_UpperCamelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
_UpperCamelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[: hidden_size, :]
_UpperCamelCase = in_proj_bias[:config.hidden_size]
_UpperCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase = in_proj_weight[-hidden_size :, :]
_UpperCamelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
_UpperCamelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[: hidden_size, :]
_UpperCamelCase = in_proj_bias[:config.hidden_size]
_UpperCamelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase = in_proj_weight[-hidden_size :, :]
_UpperCamelCase = in_proj_bias[-hidden_size :]
# fmt: on
def lowercase ( ) -> torch.Tensor:
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def lowercase ( a__ : str , a__ : str , a__ : str , a__ : bool = False ) -> str:
_UpperCamelCase = get_maskformer_config(a__ )
# load original state_dict
with open(a__ , '''rb''' ) as f:
_UpperCamelCase = pickle.load(a__ )
_UpperCamelCase = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCamelCase = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_swin_q_k_v(a__ , config.backbone_config )
read_in_decoder_q_k_v(a__ , a__ )
# update to torch tensors
for key, value in state_dict.items():
_UpperCamelCase = torch.from_numpy(a__ )
# load 🤗 model
_UpperCamelCase = MaskFormerForInstanceSegmentation(a__ )
model.eval()
for name, param in model.named_parameters():
print(a__ , param.shape )
_UpperCamelCase , _UpperCamelCase = model.load_state_dict(a__ , strict=a__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(a__ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
_UpperCamelCase = prepare_img()
if "vistas" in model_name:
_UpperCamelCase = 65
elif "cityscapes" in model_name:
_UpperCamelCase = 65535
else:
_UpperCamelCase = 255
_UpperCamelCase = True if '''ade''' in model_name else False
_UpperCamelCase = MaskFormerImageProcessor(ignore_index=a__ , reduce_labels=a__ )
_UpperCamelCase = image_processor(a__ , return_tensors='''pt''' )
_UpperCamelCase = model(**a__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCamelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , a__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
image_processor.save_pretrained(a__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 420
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 420
| 1
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = KandinskyVaaPriorPipeline
lowerCamelCase_ = ['''prompt''']
lowerCamelCase_ = ['''prompt''', '''negative_prompt''']
lowerCamelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase_ = False
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 3_2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1_0_0
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
A_ : List[str] = PriorTransformer(**lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
A_ : str = CLIPVisionModelWithProjection(lowercase )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowercase , do_normalize=lowercase , do_resize=lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.dummy_prior
A_ : Optional[int] = self.dummy_image_encoder
A_ : str = self.dummy_text_encoder
A_ : Tuple = self.dummy_tokenizer
A_ : Union[str, Any] = self.dummy_image_processor
A_ : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase , clip_sample_range=10.0 , )
A_ : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def lowerCAmelCase_ ( self , lowercase , lowercase=0 ):
"""simple docstring"""
if str(lowercase ).startswith('mps' ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : int = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 'cpu'
A_ : str = self.get_dummy_components()
A_ : str = self.pipeline_class(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Tuple = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : Dict = output.image_embeds
A_ : Any = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
A_ : List[str] = image[0, -1_0:]
A_ : Optional[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
A_ : Union[str, Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = torch_device == 'cpu'
A_ : Tuple = True
A_ : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=lowercase , relax_max_difference=lowercase , test_mean_pixel_difference=lowercase , )
@skip_mps
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = torch_device == 'cpu'
A_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase , test_mean_pixel_difference=lowercase , )
| 70
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70
| 1
|
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
_A = {} # Mapping from char to TrieNode
_A = False
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : list[str] ) -> None:
for word in words:
self.insert(UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : str ) -> None:
_A = self
for char in word:
if char not in curr.nodes:
_A = TrieNode()
_A = curr.nodes[char]
_A = True
def __UpperCAmelCase ( self : str, UpperCamelCase__ : str ) -> bool:
_A = self
for char in word:
if char not in curr.nodes:
return False
_A = curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : str ) -> None:
def _delete(UpperCamelCase__ : TrieNode, UpperCamelCase__ : str, UpperCamelCase__ : int ) -> bool:
if index == len(UpperCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
_A = False
return len(curr.nodes ) == 0
_A = word[index]
_A = curr.nodes.get(UpperCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_A = _delete(UpperCamelCase__, UpperCamelCase__, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, UpperCamelCase__, 0 )
def _SCREAMING_SNAKE_CASE ( __snake_case : TrieNode , __snake_case : str ):
if node.is_leaf:
print(__snake_case , end=' ' )
for key, value in node.nodes.items():
print_words(__snake_case , word + key )
def _SCREAMING_SNAKE_CASE ( ):
_A = 'banana bananas bandana band apple all beast'.split()
_A = TrieNode()
root.insert_many(__snake_case )
# print_words(root, "")
assert all(root.find(__snake_case ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : bool ):
print(str(__snake_case ) , 'works!' if passes else 'doesn\'t work :(' )
def _SCREAMING_SNAKE_CASE ( ):
assert test_trie()
def _SCREAMING_SNAKE_CASE ( ):
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 107
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Any ) -> List[str]:
_A = TextaTextGenerationPipeline(model=UpperCamelCase__, tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ) -> List[str]:
_A = generator('Something there' )
self.assertEqual(UpperCamelCase__, [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_A = generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
], )
_A = generator(
['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
], )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='pt' )
# do_sample=False necessary for reproducibility
_A = generator('Something there', do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
_A = 3
_A = generator(
'Something there', num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, )
_A = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
_A = generator('This is a test', do_sample=UpperCamelCase__, num_return_sequences=2, return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
], )
_A = generator.model.config.eos_token_id
_A = '<pad>'
_A = generator(
['This is a test', 'This is a second test'], do_sample=UpperCamelCase__, num_return_sequences=2, batch_size=2, return_tensors=UpperCamelCase__, )
self.assertEqual(
UpperCamelCase__, [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
], )
@require_tf
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='tf' )
# do_sample=False necessary for reproducibility
_A = generator('Something there', do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
| 107
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : List[str] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : Optional[int] = get_logger(__name__)
__A : Dict = Path(__file__).parent / "model_card_template.md"
__A : Dict = uuida().hex
__A : Union[str, Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__A : List[str] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__A : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowercase ( UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A__ : Union[str, Any] =F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(UpperCamelCase , UpperCamelCase ):
ua += "; " + user_agent
return ua
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A__ : Tuple =HfFolder.get_token()
if organization is None:
A__ : Tuple =whoami(UpperCamelCase )["name"]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : str ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(UpperCamelCase , "local_rank" ) and args.local_rank not in [-1, 0]:
return
A__ : int =args.hub_token if hasattr(UpperCamelCase , "hub_token" ) else None
A__ : Dict =get_full_repo_name(UpperCamelCase , token=UpperCamelCase )
A__ : Any =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase , model_name=UpperCamelCase , repo_name=UpperCamelCase , dataset_name=args.dataset_name if hasattr(UpperCamelCase , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(UpperCamelCase , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
A__ : str =os.path.join(args.output_dir , "README.md" )
model_card.save(UpperCamelCase )
def lowercase ( UpperCamelCase : Optional[str] , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A__ : Any =str(Path(UpperCamelCase ).as_posix() )
A__ : List[Any] =re.search(R"snapshots/([^/]+)/" , UpperCamelCase )
if search is None:
return None
A__ : List[Any] =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Optional[Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__A : Dict = os.path.join(hf_cache_home, "diffusers")
def lowercase ( UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A__ : List[Any] =DIFFUSERS_CACHE
if old_cache_dir is None:
A__ : Optional[Any] =old_diffusers_cache
A__ : int =Path(UpperCamelCase ).expanduser()
A__ : Any =Path(UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A__ : List[Any] =new_cache_dir / old_blob_path.relative_to(UpperCamelCase )
new_blob_path.parent.mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
os.replace(UpperCamelCase , UpperCamelCase )
try:
os.symlink(UpperCamelCase , UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : List[str] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__A : str = 0
else:
with open(cache_version_file) as f:
try:
__A : Optional[Any] = int(f.read())
except ValueError:
__A : List[Any] = 0
if cache_version < 1:
__A : str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__A : str = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"the directory exists and can be written to."
)
def lowercase ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A__ : Dict =weights_name.split("." )
A__ : List[str] =splits[:-1] + [variant] + splits[-1:]
A__ : str =".".join(UpperCamelCase )
return weights_name
def lowercase ( UpperCamelCase : List[str] , *,
UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=None , ):
"""simple docstring"""
A__ : Optional[int] =str(UpperCamelCase )
if os.path.isfile(UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(UpperCamelCase ):
if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A__ : Tuple =os.path.join(UpperCamelCase , UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ):
A__ : str =os.path.join(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A__ : Any =hf_hub_download(
UpperCamelCase , filename=_add_variant(UpperCamelCase , UpperCamelCase ) , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , use_auth_token=UpperCamelCase , user_agent=UpperCamelCase , subfolder=UpperCamelCase , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , UpperCamelCase , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase , UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase , UpperCamelCase )}\' so that the correct variant file can be added.''' , UpperCamelCase , )
try:
# 2. Load model file as usual
A__ : List[Any] =hf_hub_download(
UpperCamelCase , filename=UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , proxies=UpperCamelCase , resume_download=UpperCamelCase , local_files_only=UpperCamelCase , use_auth_token=UpperCamelCase , user_agent=UpperCamelCase , subfolder=UpperCamelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 595
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPanoramaPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase_ = DDIMScheduler()
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
lowercase_ = torch.manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowercase_ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = sd_pipe(**UpperCamelCase__ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowercase_ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = """french fries"""
lowercase_ = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowercase_ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase_ = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowercase_ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = sd_pipe(**UpperCamelCase__ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCamelCase__ )
lowercase_ = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
lowercase_ = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = sd_pipe(**UpperCamelCase__ ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : int=0 ):
'''simple docstring'''
lowercase_ = torch.manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = """stabilityai/stable-diffusion-2-base"""
lowercase_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowercase_ = self.get_inputs()
lowercase_ = pipe(**UpperCamelCase__ ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowercase_ = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCamelCase__ )
lowercase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowercase_ = self.get_inputs()
lowercase_ = pipe(**UpperCamelCase__ ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowercase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor ) -> None:
lowercase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase_ = latents[0, -3:, -3:, -1]
lowercase_ = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase_ = latents[0, -3:, -3:, -1]
lowercase_ = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase_ = False
lowercase_ = """stabilityai/stable-diffusion-2-base"""
lowercase_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
lowercase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
lowercase_ = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = """stabilityai/stable-diffusion-2-base"""
lowercase_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder="""scheduler""" )
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
lowercase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase_ = self.get_inputs()
lowercase_ = pipe(**UpperCamelCase__ )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 412
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return np.dot(UpperCAmelCase__ , UpperCAmelCase__ )
class UpperCamelCase__ :
def __init__( self : Any , *,
UpperCamelCase__ : float = np.inf , UpperCamelCase__ : str = "linear" , UpperCamelCase__ : float = 0.0 , ):
'''simple docstring'''
lowercase_ = regularization
lowercase_ = gamma
if kernel == "linear":
lowercase_ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
lowercase_ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase_ = F'''Unknown kernel: {kernel}'''
raise ValueError(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ndarray , UpperCamelCase__ : ndarray ):
'''simple docstring'''
return np.dot(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : ndarray , UpperCamelCase__ : ndarray ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : list[ndarray] , UpperCamelCase__ : ndarray ):
'''simple docstring'''
lowercase_ = observations
lowercase_ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase_) , ) = np.shape(UpperCamelCase__ )
def to_minimize(UpperCamelCase__ : ndarray ) -> float:
lowercase_ = 0
((lowercase_) , ) = np.shape(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase__ )
lowercase_ = LinearConstraint(UpperCamelCase__ , 0 , 0 )
lowercase_ = Bounds(0 , self.regularization )
lowercase_ = minimize(
UpperCamelCase__ , np.ones(UpperCamelCase__ ) , bounds=UpperCamelCase__ , constraints=[ly_contraint] ).x
lowercase_ = l_star
# calculating mean offset of separation plane to points
lowercase_ = 0
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase_ = s / n
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : ndarray ):
'''simple docstring'''
lowercase_ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
| 1
|
'''simple docstring'''
from manim import *
class lowerCAmelCase ( a ):
def lowercase ( self ):
lowerCAmelCase : int = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : str = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Any = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : str = VGroup(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Dict = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case__ )
lowerCAmelCase : str = [mem.copy() for i in range(1 )]
lowerCAmelCase : Any = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Optional[int] = Text('GPU' , font_size=24 )
lowerCAmelCase : List[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
gpu.align_to(snake_case__ , snake_case__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case__ )
lowerCAmelCase : Dict = [mem.copy() for i in range(6 )]
lowerCAmelCase : Dict = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
lowerCAmelCase : Tuple = Text('Model' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , )
lowerCAmelCase : Tuple = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : List[Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case__ , run_time=2.5 ) , Write(snake_case__ ) , Write(snake_case__ ) )
self.add(snake_case__ )
lowerCAmelCase : List[str] = []
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[Any] = []
for i, rect in enumerate(snake_case__ ):
lowerCAmelCase : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(snake_case__ , opacity=0.7 )
cpu_target.move_to(snake_case__ )
cpu_target.generate_target()
lowerCAmelCase : str = 0.4_6 / 4
lowerCAmelCase : Optional[Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=snake_case__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case__ , buff=0.0 )
cpu_targs.append(snake_case__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case__ ) )
second_animations.append(MoveToTarget(snake_case__ , run_time=1.5 ) )
self.play(*snake_case__ )
self.play(*snake_case__ )
self.wait()
| 646
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646
| 1
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( _A ):
return (data["data"], data["target"])
def lowerCamelCase__ ( _A , _A ):
a : int = XGBClassifier()
classifier.fit(_A , _A )
return classifier
def lowerCamelCase__ ( ):
a : Optional[int] = load_iris()
a , a : Any = data_handling(_A )
a , a , a , a : Union[str, Any] = train_test_split(
_A , _A , test_size=0.25 )
a : List[str] = iris['target_names']
# Create an XGBoost Classifier from the training data
a : Optional[Any] = xgboost(_A , _A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_A , _A , _A , display_labels=_A , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 526
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 526
| 1
|
_UpperCAmelCase : int = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 705
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _snake_case :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : int=13 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : str=99 ,SCREAMING_SNAKE_CASE__ : Optional[int]=64 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=5 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : List[str]=37 ,SCREAMING_SNAKE_CASE__ : int="gelu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : Optional[int]=16 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : str=4 ,SCREAMING_SNAKE_CASE__ : Tuple=None ,):
SCREAMING_SNAKE_CASE:Optional[int] = parent
SCREAMING_SNAKE_CASE:List[Any] = batch_size
SCREAMING_SNAKE_CASE:Optional[int] = seq_length
SCREAMING_SNAKE_CASE:List[str] = is_training
SCREAMING_SNAKE_CASE:Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE:Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE:Dict = use_labels
SCREAMING_SNAKE_CASE:int = vocab_size
SCREAMING_SNAKE_CASE:Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE:Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE:int = num_attention_heads
SCREAMING_SNAKE_CASE:Tuple = intermediate_size
SCREAMING_SNAKE_CASE:str = hidden_act
SCREAMING_SNAKE_CASE:Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Any = max_position_embeddings
SCREAMING_SNAKE_CASE:List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE:List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE:str = initializer_range
SCREAMING_SNAKE_CASE:List[str] = num_labels
SCREAMING_SNAKE_CASE:Optional[int] = num_choices
SCREAMING_SNAKE_CASE:Dict = scope
SCREAMING_SNAKE_CASE:Tuple = vocab_size - 1
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE:Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE:int = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE:List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self : Tuple ):
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE:List[str] = True
return config, input_ids, input_mask, token_labels
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = GPTNeoXModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = True
SCREAMING_SNAKE_CASE:Dict = GPTNeoXModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:Any = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = self.num_labels
SCREAMING_SNAKE_CASE:str = GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:List[str] = self.num_labels
SCREAMING_SNAKE_CASE:Dict = GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:List[str] = self.num_labels
SCREAMING_SNAKE_CASE:Optional[Any] = GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Optional[Any] = True
SCREAMING_SNAKE_CASE:Optional[Any] = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE:List[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,use_cache=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE:Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE:List[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE:str = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = output_from_no_past["hidden_states"][0]
SCREAMING_SNAKE_CASE:Tuple = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,past_key_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,)["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE:Optional[int] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE:Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE:List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 ) )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = config_and_inputs
SCREAMING_SNAKE_CASE:Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , _a , unittest.TestCase ):
_A : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_A : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_A : Tuple = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = False
_A : List[Any] = False
_A : int = False
_A : Optional[Any] = False
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = GPTNeoXModelTester(self )
SCREAMING_SNAKE_CASE:int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=64 ,num_attention_heads=8 )
def __UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
# This regression test was failing with PyTorch < 1.3
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE:Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def __UpperCamelCase ( self : List[Any] ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE:Any = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE:Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE:Optional[Any] = GPTNeoXModel(SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
original_model.eval()
SCREAMING_SNAKE_CASE:Optional[int] = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
SCREAMING_SNAKE_CASE:List[Any] = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE:str = {"type": scaling_type, "factor": 10.0}
SCREAMING_SNAKE_CASE:str = GPTNeoXModel(SCREAMING_SNAKE_CASE__ )
scaled_model.to(SCREAMING_SNAKE_CASE__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE:str = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
SCREAMING_SNAKE_CASE:Tuple = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-5 ) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
SCREAMING_SNAKE_CASE:Tuple = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = tokenizer("My favorite food is" ,return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
SCREAMING_SNAKE_CASE:Optional[int] = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
SCREAMING_SNAKE_CASE:List[Any] = model.generate(**SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,max_new_tokens=20 )
SCREAMING_SNAKE_CASE:int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )[0]
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
| 143
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE:str = json.load(snake_case )
SCREAMING_SNAKE_CASE:List[str] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE:Tuple = torch.load(snake_case , map_location="cpu" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE:Dict = load_entity_vocab(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE:Dict = AddedToken("<ent>" , lstrip=snake_case , rstrip=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = AddedToken("<ent2>" , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case , snake_case )
SCREAMING_SNAKE_CASE:str = LukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE:Optional[Any] = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE:str = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE:int = entity_emb[entity_vocab["[MASK]"]]
SCREAMING_SNAKE_CASE:str = LukeModel(config=snake_case ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = model.load_state_dict(snake_case , strict=snake_case )
if not (len(snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
SCREAMING_SNAKE_CASE:Optional[Any] = LukeTokenizer.from_pretrained(snake_case , task="entity_classification" )
SCREAMING_SNAKE_CASE:Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
SCREAMING_SNAKE_CASE:List[str] = (39, 42)
SCREAMING_SNAKE_CASE:int = tokenizer(snake_case , entity_spans=[span] , add_prefix_space=snake_case , return_tensors="pt" )
SCREAMING_SNAKE_CASE:Any = model(**snake_case )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE:int = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 1, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE:Any = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case ) )
model.save_pretrained(snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = {}
with open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = line.rstrip().split("\t" )
SCREAMING_SNAKE_CASE:str = index
return entity_vocab
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 143
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase ( a_ ):
_lowerCamelCase : torch.FloatTensor
class lowercase ( a_, a_ ):
@register_to_config
def __init__( self , _snake_case = 6_5536 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase_ : Tuple = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case)
UpperCAmelCase_ : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase_ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case)
UpperCAmelCase_ : List[Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase_ : Dict = block_out_channels[0] * 4
UpperCAmelCase_ : List[Any] = TimestepEmbedding(
in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , )
UpperCAmelCase_ : int = nn.ModuleList([])
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = nn.ModuleList([])
UpperCAmelCase_ : Any = None
# down
UpperCAmelCase_ : Dict = in_channels
for i, down_block_type in enumerate(_snake_case):
UpperCAmelCase_ : int = output_channel
UpperCAmelCase_ : Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase_ : int = i == len(_snake_case) - 1
UpperCAmelCase_ : Any = get_down_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_snake_case)
# mid
UpperCAmelCase_ : Optional[int] = get_mid_block(
_snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , )
# up
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case))
UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase_ : Tuple = out_channels
else:
UpperCAmelCase_ : int = block_out_channels[0]
for i, up_block_type in enumerate(_snake_case):
UpperCAmelCase_ : Dict = output_channel
UpperCAmelCase_ : Optional[Any] = (
reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels
)
UpperCAmelCase_ : str = i == len(_snake_case) - 1
UpperCAmelCase_ : Union[str, Any] = get_up_block(
_snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_snake_case)
UpperCAmelCase_ : Dict = output_channel
# out
UpperCAmelCase_ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
UpperCAmelCase_ : Any = get_out_block(
out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case = True , ) -> Union[UNetaDOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = timestep
if not torch.is_tensor(_snake_case):
UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0:
UpperCAmelCase_ : Tuple = timesteps[None].to(sample.device)
UpperCAmelCase_ : Any = self.time_proj(_snake_case)
if self.config.use_timestep_embedding:
UpperCAmelCase_ : int = self.time_mlp(_snake_case)
else:
UpperCAmelCase_ : int = timestep_embed[..., None]
UpperCAmelCase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
UpperCAmelCase_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
UpperCAmelCase_ : Optional[Any] = ()
for downsample_block in self.down_blocks:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downsample_block(hidden_states=_snake_case , temb=_snake_case)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase_ : List[Any] = self.mid_block(_snake_case , _snake_case)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
UpperCAmelCase_ : int = down_block_res_samples[-1:]
UpperCAmelCase_ : Tuple = down_block_res_samples[:-1]
UpperCAmelCase_ : List[Any] = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case)
# 5. post-process
if self.out_block:
UpperCAmelCase_ : Optional[Any] = self.out_block(_snake_case , _snake_case)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_snake_case)
| 471
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def SCREAMING_SNAKE_CASE( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,) -> List[Any]:
if attention_mask is None:
UpperCAmelCase_ : Tuple = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
UpperCAmelCase_ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=16 , _snake_case=2 , _snake_case=4 , _snake_case=4 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=32 , _snake_case=2 , _snake_case=1 , _snake_case=0 , _snake_case=0.02 , ) -> int:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Tuple = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
UpperCAmelCase_ : Optional[int] = initializer_range
def _snake_case ( self) -> Any:
UpperCAmelCase_ : int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
UpperCAmelCase_ : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
UpperCAmelCase_ : Optional[int] = shift_tokens_right(_snake_case , 1 , 2)
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_snake_case , )
UpperCAmelCase_ : List[Any] = prepare_blenderbot_inputs_dict(_snake_case , _snake_case , _snake_case)
return config, inputs_dict
def _snake_case ( self) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> Tuple:
UpperCAmelCase_ : Optional[Any] = 20
UpperCAmelCase_ : str = model_class_name(_snake_case)
UpperCAmelCase_ : Dict = model.encode(inputs_dict['input_ids'])
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case)
UpperCAmelCase_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4')
UpperCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
UpperCAmelCase_ : Any = model.decode(
decoder_input_ids[:, -1:] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_snake_case , )
UpperCAmelCase_ : Optional[int] = model.decode(_snake_case , _snake_case)
UpperCAmelCase_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""")
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> int:
UpperCAmelCase_ : int = 20
UpperCAmelCase_ : Optional[int] = model_class_name(_snake_case)
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict['input_ids'])
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case)
UpperCAmelCase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
UpperCAmelCase_ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , _snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_snake_case , decoder_position_ids=_snake_case , )
UpperCAmelCase_ : Any = model.decode(_snake_case , _snake_case , decoder_attention_mask=_snake_case)
UpperCAmelCase_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""")
@require_flax
class lowercase ( unittest.TestCase ):
_lowerCamelCase : Union[str, Any]= 99
def _snake_case ( self) -> int:
UpperCAmelCase_ : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self._get_config_and_data()
UpperCAmelCase_ : str = FlaxBlenderbotSmallForConditionalGeneration(_snake_case)
UpperCAmelCase_ : Union[str, Any] = lm_model(input_ids=_snake_case)
UpperCAmelCase_ : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _snake_case)
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_snake_case)
UpperCAmelCase_ : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
UpperCAmelCase_ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
UpperCAmelCase_ : List[str] = lm_model(input_ids=_snake_case , decoder_input_ids=_snake_case)
UpperCAmelCase_ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _snake_case)
def _snake_case ( self) -> Optional[Any]:
UpperCAmelCase_ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
UpperCAmelCase_ : List[str] = shift_tokens_right(_snake_case , 1 , 2)
UpperCAmelCase_ : Dict = np.equal(_snake_case , 1).astype(np.floataa).sum()
UpperCAmelCase_ : Union[str, Any] = np.equal(_snake_case , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(_snake_case , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class lowercase ( a_, unittest.TestCase, a_ ):
_lowerCamelCase : str= True
_lowerCamelCase : Union[str, Any]= (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowerCamelCase : Optional[int]= (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self) -> Optional[Any]:
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallModelTester(self)
def _snake_case ( self) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_snake_case , _snake_case , _snake_case)
def _snake_case ( self) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_snake_case , _snake_case , _snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ : Dict = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ : Union[str, Any] = model_class(_snake_case)
@jax.jit
def encode_jitted(_snake_case , _snake_case=None , **_snake_case):
return model.encode(input_ids=_snake_case , attention_mask=_snake_case)
with self.subTest('JIT Enabled'):
UpperCAmelCase_ : Optional[int] = encode_jitted(**_snake_case).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
UpperCAmelCase_ : Optional[int] = encode_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def _snake_case ( self) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ : Tuple = model_class(_snake_case)
UpperCAmelCase_ : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'])
UpperCAmelCase_ : str = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_snake_case , _snake_case , _snake_case):
return model.decode(
decoder_input_ids=_snake_case , decoder_attention_mask=_snake_case , encoder_outputs=_snake_case , )
with self.subTest('JIT Enabled'):
UpperCAmelCase_ : Tuple = decode_jitted(**_snake_case).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _snake_case ( self) -> Any:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Any = model_class_name.from_pretrained('facebook/blenderbot_small-90M')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[Any] = np.ones((1, 1)) * model.config.eos_token_id
UpperCAmelCase_ : Dict = model(_snake_case)
self.assertIsNotNone(_snake_case)
| 471
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : int = """huggingface/label-files"""
A : Optional[int] = """imagenet-1k-id2label.json"""
A : Dict = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
A : Any = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A : Any = {v: k for k, v in idalabel.items()}
A : Dict = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A : str = BitConfig(
conv_layer=_lowerCAmelCase , num_labels=1000 , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , )
return config
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if "stem.conv" in name:
A : Tuple = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
A : Optional[int] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
A : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
A : Tuple = """bit.""" + name
if "bit" not in name and "classifier" not in name:
A : Dict = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
"""simple docstring"""
A : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : Union[str, Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = get_config(_lowerCAmelCase )
# load original model from timm
A : Dict = create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model
A : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
A : List[str] = state_dict.pop(_lowerCAmelCase )
A : List[Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
A : List[Any] = BitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
A : Union[str, Any] = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
A : List[Any] = transform.transforms
A : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A : Tuple = BitImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A : Optional[Any] = prepare_img()
A : Optional[Any] = transform(_lowerCAmelCase ).unsqueeze(0 )
A : Optional[Any] = processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
A : List[str] = model(_lowerCAmelCase )
A : Any = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
A : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 662
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = bnb_quantization_config.load_in_abit
A : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A : Any = []
# custom device map
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(device_map.keys() ) > 1:
A : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A : int = get_keys_to_not_convert(_lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_lowerCAmelCase )
A : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A : Dict = []
A : Tuple = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_lowerCAmelCase )
# compatibility with peft
A : Union[str, Any] = load_in_abit
A : Tuple = load_in_abit
A : List[str] = get_parameter_device(_lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A : Optional[int] = replace_with_bnb_layers(_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
# convert param to the right dtype
A : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A : Optional[Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
A : int = getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_lowerCAmelCase ):
param.to(_lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
A : str = replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , modules_to_not_convert=_lowerCAmelCase )
A : Optional[Any] = get_quantized_model_device_map(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , max_memory=_lowerCAmelCase , no_split_module_classes=_lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A : Tuple = True
A : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=_lowerCAmelCase , offload_state_dict=_lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_lowerCAmelCase , device_map=_lowerCAmelCase , offload_dir=_lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A : Optional[int] = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A : Any = {}
A : List[str] = special_dtypes
A : Any = no_split_module_classes
A : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A : Tuple = get_balanced_memory(
_lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=_lowerCAmelCase , **_lowerCAmelCase , )
A : int = max_memory
A : Any = infer_auto_device_map(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# check if don't have any quantized module on the cpu
A : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A : Optional[int] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
A : Optional[Any] = []
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> int:
"""simple docstring"""
A : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
A : int = []
current_key_name.append(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A : Dict = """.""".join(_lowerCAmelCase )
A : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A : Optional[Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
A : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A : Any = module.weight.data
if module.bias is not None:
A : Any = module.bias.data
bnb_module.requires_grad_(_lowerCAmelCase )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Dict = True
if len(list(module.children() ) ) > 0:
A , A : Dict = _replace_with_bnb_layers(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with init_empty_weights():
A : Tuple = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A : Optional[int] = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A : Optional[int] = sum(_lowerCAmelCase , [] )
A : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
A : List[str] = False
if hasattr(_lowerCAmelCase , """base_model_prefix""" ):
A : Optional[Any] = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A : str = list(model.named_children() )
A : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
A : int = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
A : Optional[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
A : Union[str, Any] = [""".weight""", """.bias"""]
A : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A : List[str] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , 0 , dtype=_lowerCAmelCase , value=_lowerCAmelCase )
A : Tuple = param_name
A : Union[str, Any] = model
if "." in tensor_name:
A : int = tensor_name.split(""".""" )
for split in splits[:-1]:
A : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
A : Optional[Any] = new_module
A : List[str] = splits[-1]
# offload weights
A : Optional[int] = False
offload_weight(module._parameters[tensor_name] , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase , )
else:
offload_weight(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index=_lowerCAmelCase )
offload_weight(_lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , _lowerCAmelCase , index=_lowerCAmelCase )
set_module_tensor_to_device(_lowerCAmelCase , _lowerCAmelCase , """meta""" , dtype=_lowerCAmelCase , value=torch.empty(*param.size() ) )
| 662
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: str = list(__A )
lowercase__: Optional[Any] = list(__A )
lowercase__: str = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count += 1
lowercase__: Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(__A )
def A( snake_case_ ):
"""simple docstring"""
lowercase__: int = []
while True:
lowercase__: Optional[int] = ['''$'''] * len(__A )
lowercase__: int = []
for i in range(len(__A ) ):
for j in range(i + 1 , len(__A ) ):
lowercase__: str = compare_string(binary[i] , binary[j] )
if k is False:
lowercase__: List[str] = '''*'''
lowercase__: Any = '''*'''
temp.append("X" )
for i in range(len(__A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__A ) == 0:
return pi
lowercase__: Union[str, Any] = list(set(__A ) )
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: str = []
for minterm in minterms:
lowercase__: Dict = ''''''
for _ in range(__A ):
lowercase__: Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__A )
return temp
def A( snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: Dict = list(__A )
lowercase__: Tuple = list(__A )
lowercase__: int = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: Optional[Any] = []
lowercase__: List[str] = [0] * len(__A )
for i in range(len(chart[0] ) ):
lowercase__: int = 0
lowercase__: List[Any] = -1
for j in range(len(__A ) ):
if chart[j][i] == 1:
count += 1
lowercase__: List[Any] = j
if count == 1:
lowercase__: int = 1
for i in range(len(__A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__A ) ):
lowercase__: Any = 0
temp.append(prime_implicants[i] )
while True:
lowercase__: Any = 0
lowercase__: Any = -1
lowercase__: List[str] = 0
for i in range(len(__A ) ):
lowercase__: List[Any] = chart[i].count(1 )
if count_n > max_n:
lowercase__: Dict = count_n
lowercase__: Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__A ) ):
lowercase__: int = 0
def A( snake_case_ , snake_case_ ):
"""simple docstring"""
lowercase__: List[Any] = [[0 for x in range(len(__A ) )] for x in range(len(__A ) )]
for i in range(len(__A ) ):
lowercase__: Union[str, Any] = prime_implicants[i].count("_" )
for j in range(len(__A ) ):
if is_for_table(prime_implicants[i] , binary[j] , __A ):
lowercase__: Union[str, Any] = 1
return chart
def A( ):
"""simple docstring"""
lowercase__: Union[str, Any] = int(input("Enter the no. of variables\n" ) )
lowercase__: Dict = [
float(__A )
for x in input(
"Enter the decimal representation of Minterms \'Spaces Separated\'\n" ).split()
]
lowercase__: int = decimal_to_binary(__A , __A )
lowercase__: Tuple = check(__A )
print("Prime Implicants are:" )
print(__A )
lowercase__: Optional[Any] = prime_implicant_chart(__A , __A )
lowercase__: Tuple = selection(__A , __A )
print("Essential Prime Implicants are:" )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = ConsistencyModelPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Any = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def __lowercase ( self , UpperCAmelCase_=False) -> str:
'''simple docstring'''
if class_cond:
lowercase__: List[str] = self.dummy_cond_unet
else:
lowercase__: List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: Any = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_=0) -> Union[str, Any]:
'''simple docstring'''
if str(UpperCAmelCase_).startswith("mps"):
lowercase__: List[str] = torch.manual_seed(UpperCAmelCase_)
else:
lowercase__: Optional[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowercase__: Optional[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: List[str] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: List[str] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: List[str] = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: Optional[Any] = self.get_dummy_components(class_cond=UpperCAmelCase_)
lowercase__: List[str] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: Union[str, Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Optional[int] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = 0
lowercase__: Union[str, Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[int] = image[0, -3:, -3:, -1]
lowercase__: Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: str = self.get_dummy_components()
lowercase__: Optional[int] = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: List[str] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: str = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Any = 1
lowercase__: str = None
lowercase__: Dict = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: int = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components(class_cond=UpperCAmelCase_)
lowercase__: Any = ConsistencyModelPipeline(**UpperCAmelCase_)
lowercase__: int = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_)
lowercase__: Optional[int] = 1
lowercase__: Tuple = None
lowercase__: int = 0
lowercase__: str = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 32, 32, 3)
lowercase__: Tuple = image[0, -3:, -3:, -1]
lowercase__: Union[str, Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , UpperCAmelCase_=0 , UpperCAmelCase_=False , UpperCAmelCase_="cpu" , UpperCAmelCase_=torch.floataa , UpperCAmelCase_=(1, 3, 64, 64)) -> List[Any]:
'''simple docstring'''
lowercase__: Any = torch.manual_seed(UpperCAmelCase_)
lowercase__: Union[str, Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
lowercase__: Dict = self.get_fixed_latents(seed=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ , shape=UpperCAmelCase_)
lowercase__: Tuple = latents
return inputs
def __lowercase ( self , UpperCAmelCase_=0 , UpperCAmelCase_="cpu" , UpperCAmelCase_=torch.floataa , UpperCAmelCase_=(1, 3, 64, 64)) -> Tuple:
'''simple docstring'''
if type(UpperCAmelCase_) == str:
lowercase__: Optional[int] = torch.device(UpperCAmelCase_)
lowercase__: Any = torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
lowercase__: Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
return latents
def __lowercase ( self) -> int:
'''simple docstring'''
lowercase__: int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: List[Any] = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: Any = self.get_inputs()
lowercase__: Any = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = image[0, -3:, -3:, -1]
lowercase__: Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: str = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[str] = self.get_inputs()
lowercase__: Optional[int] = 1
lowercase__: int = None
lowercase__: List[Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: int = image[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
@require_torch_a
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: Dict = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: Dict = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[str] = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_):
lowercase__: Union[str, Any] = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__: Any = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@require_torch_a
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
lowercase__: Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
lowercase__: List[Any] = ConsistencyModelPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
pipe.to(torch_device=UpperCAmelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
lowercase__: List[Any] = self.get_inputs(get_fixed_latents=UpperCAmelCase_ , device=UpperCAmelCase_)
lowercase__: Optional[Any] = 1
lowercase__: Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase_ , enable_math=UpperCAmelCase_ , enable_mem_efficient=UpperCAmelCase_):
lowercase__: str = pipe(**UpperCAmelCase_).images
assert image.shape == (1, 64, 64, 3)
lowercase__: Any = image[0, -3:, -3:, -1]
lowercase__: Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 120
| 0
|
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase__ : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[Any] = 0
lowercase__ : Dict = 2
while digits < n:
index += 1
lowercase__ : str = len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 12
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : Union[str, Any] = is_training
UpperCamelCase : int = use_input_mask
UpperCamelCase : Optional[Any] = use_token_type_ids
UpperCamelCase : Optional[Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = embedding_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : List[str] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase : Dict = model(A_ , token_type_ids=A_ )
UpperCamelCase : Union[str, Any] = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Dict = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_labels
UpperCamelCase : Optional[int] = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : List[str] = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : List[str] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_choices
UpperCamelCase : int = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Any = config_and_inputs
UpperCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
# test_resize_embeddings = False
_UpperCAmelCase :Optional[Any] = False
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = MegatronBertModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def A_ ( _lowerCAmelCase ) -> int:
return torch.tensor(
_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase , )
__lowerCamelCase : Optional[int] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase : Union[str, Any] = os.path.join(os.environ["MYDIR"] , A_ )
UpperCamelCase : List[Any] = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
UpperCamelCase : Optional[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(A_ )[0]
UpperCamelCase : Dict = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
UpperCamelCase : Union[str, Any] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase : List[str] = output[0, ii, jj]
UpperCamelCase : List[Any] = expected[3 * ii + jj]
UpperCamelCase : Optional[Any] = "ii={} jj={} a={} b={}".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 629
| 0
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
UpperCamelCase__ = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
UpperCamelCase__ = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ : List[str] = numpy_to_pil(snake_case_ )
return images
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if images.ndim == 3:
lowercase_ : List[str] = images[None, ...]
lowercase_ : int = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowercase_ : Tuple = [Image.fromarray(snake_case_ ) for image in images]
return pil_images
| 710
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 0
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18
| 0
|
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : str ):
'''simple docstring'''
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
while len(lowercase_ ) % 3 != 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''0''' + bin_string
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(lowercase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__SCREAMING_SNAKE_CASE : Tuple = 0
for index, val in enumerate(lowercase_ ):
oct_val += int(2 ** (2 - index) * int(lowercase_ ) )
oct_string += str(lowercase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 401
|
"""simple docstring"""
from math import pow, sqrt
def lowerCAmelCase_ ( *lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = len(lowercase_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 401
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE_ = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class snake_case_ ( _lowerCamelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = GPTaTokenizer
def __init__( self , a_=None , a_=None , a_=None , a_="<|endoftext|>" , a_="<|endoftext|>" , a_="<|endoftext|>" , a_=False , **a_ , ):
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
a_ : List[str] = kwargs.pop("add_bos_token" , _A )
a_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _A ) != add_prefix_space:
a_ : Optional[int] = getattr(_A , pre_tok_state.pop("type" ) )
a_ : List[str] = add_prefix_space
a_ : List[Any] = pre_tok_class(**_A )
a_ : Dict = add_prefix_space
def snake_case_ ( self , *a_ , **a_ ):
a_ : List[str] = kwargs.get("is_split_into_words" , _A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def snake_case_ ( self , *a_ , **a_ ):
a_ : Any = kwargs.get("is_split_into_words" , _A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def snake_case_ ( self , a_ , a_ = None ):
a_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def snake_case_ ( self , a_ ):
a_ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
a_ : int = input_ids[-self.model_max_length :]
return input_ids
| 237
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = AlbertTokenizer
_UpperCamelCase : Dict = AlbertTokenizerFast
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = True
def __a ( self : int ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : List[Any] = AlbertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : List[Any] , _A : Tuple ) -> Any:
"""simple docstring"""
lowercase : int = '''this is a test'''
lowercase : List[str] = '''this is a test'''
return input_text, output_text
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = '''<pad>'''
lowercase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(_A ) , 30_000 )
def __a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase : List[Any] = self.get_tokenizer()
lowercase : str = self.get_rust_tokenizer()
lowercase : List[str] = '''I was born in 92000, and this is falsé.'''
lowercase : int = tokenizer.tokenize(_A )
lowercase : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
lowercase : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowercase : List[Any] = self.get_rust_tokenizer()
lowercase : Dict = tokenizer.encode(_A )
lowercase : List[Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = AlbertTokenizer(_A , keep_accents=_A )
lowercase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [48, 25, 21, 1_289] )
lowercase : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowercase : Any = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = AlbertTokenizer(_A )
lowercase : List[str] = tokenizer.encode('''sequence builders''' )
lowercase : int = tokenizer.encode('''multi-sequence build''' )
lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : Dict = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 217
| 0
|
"""simple docstring"""
from __future__ import annotations
snake_case = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
snake_case = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def snake_case ( lowerCAmelCase_ ) -> list[float]:
_snake_case = []
_snake_case = len(lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_snake_case = -1
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] < arr[j]:
_snake_case = arr[j]
break
result.append(lowerCAmelCase_ )
return result
def snake_case ( lowerCAmelCase_ ) -> list[float]:
_snake_case = []
for i, outer in enumerate(lowerCAmelCase_ ):
_snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case = inner
break
result.append(lowerCAmelCase_ )
return result
def snake_case ( lowerCAmelCase_ ) -> list[float]:
_snake_case = len(lowerCAmelCase_ )
_snake_case = []
_snake_case = [-1] * arr_size
for index in reversed(range(lowerCAmelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
snake_case = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 710
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 1000 ) -> int:
return sum(e for e in range(3 , lowerCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 404
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case ( ):
'''simple docstring'''
__lowercase = [randint(-1_000 , 1_000 ) for i in range(10 )]
__lowercase = randint(-5_000 , 5_000 )
return (arr, r)
__UpperCamelCase : Any = make_dataset()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for triplet in permutations(lowerCamelCase , 3 ):
if sum(lowerCamelCase ) == target:
return tuple(sorted(lowerCamelCase ) )
return (0, 0, 0)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
arr.sort()
__lowercase = len(lowerCamelCase )
for i in range(n - 1 ):
__lowercase , __lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case ( ):
'''simple docstring'''
__lowercase = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowercase = """
triplet_sum1(*dataset)
"""
__lowercase = """
triplet_sum2(*dataset)
"""
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
__lowercase = repeat(setup=lowerCamelCase , stmt=lowerCamelCase , repeat=5 , number=10_000 )
return (min(lowerCamelCase ), min(lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : Tuple = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 80
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __A ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 211
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
from __future__ import annotations
from collections.abc import Callable
def a ( snake_case__: Callable[[int | float], int | float] , snake_case__: int | float , snake_case__: int | float , snake_case__: int = 100 , ):
'''simple docstring'''
lowercase_ = x_start
lowercase_ = fnc(snake_case__ )
lowercase_ = 0.0
for _ in range(snake_case__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase_ = (x_end - x_start) / steps + xa
lowercase_ = fnc(snake_case__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase_ = xa
lowercase_ = fxa
return area
if __name__ == "__main__":
def a ( snake_case__: List[Any] ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__a = 1_0
while i <= 1_0_0_0_0_0:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0
| 409
| 0
|
"""simple docstring"""
from __future__ import annotations
class lowercase__:
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :int = 0 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = key
def __lowerCAmelCase ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> list[str]:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE : int = ''''''
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :int = 0 ) -> str:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :str , lowerCamelCase_ :int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 698
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE_ () -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 716
|
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
a__ =tuple[int, int]
class snake_case :
"""simple docstring"""
def __init__( self : Tuple , __A : set[int] , __A : Mapping[EdgeT, int] ):
__UpperCamelCase = vertices
__UpperCamelCase = {
(min(__A ), max(__A )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self : Optional[Any] , __A : EdgeT , __A : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__UpperCamelCase = weight
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = Graph({min(self.vertices )} , {} )
__UpperCamelCase = 4_2
__UpperCamelCase = 4_2
__UpperCamelCase = 4_2
__UpperCamelCase = 4_2
while len(subgraph.vertices ) < len(self.vertices ):
__UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__UpperCamelCase = edge
__UpperCamelCase = weight
subgraph.add_edge(__A , __A )
return subgraph
def lowercase__ ( __lowercase : str = "p107_network.txt" ) -> int:
"""simple docstring"""
__UpperCamelCase = os.path.abspath(os.path.dirname(__lowercase ) )
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = {}
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
with open(__lowercase ) as f:
__UpperCamelCase = f.read().strip().split('\n' )
__UpperCamelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(__lowercase ) ):
for edgea in range(__lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
__UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
__UpperCamelCase = Graph(set(range(len(__lowercase ) ) ) , __lowercase )
__UpperCamelCase = graph.prims_algorithm()
__UpperCamelCase = sum(graph.edges.values() )
__UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 708
|
'''simple docstring'''
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def lowercase__ ( __lowercase : float , __lowercase : str , __lowercase : str ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__lowercase )}'''
)
raise ValueError(__lowercase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434
| 0
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A ( SCREAMING_SNAKE_CASE__ ):
def __get__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCAmelCase__ = "__cached_" + self.fget.__name__
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ , __magic_name__ )
if cached is None:
lowerCAmelCase__ = self.fget(__magic_name__ )
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
return cached
def A ( UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def A ( UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
if is_torch_fx_proxy(UpperCamelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase_ , np.ndarray )
def A ( UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
return isinstance(UpperCamelCase_ , np.ndarray )
def A ( UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return _is_numpy(UpperCamelCase_ )
def A ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase_ , torch.Tensor )
def A ( UpperCamelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(UpperCamelCase_ )
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase_ , torch.device )
def A ( UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
else:
return False
return isinstance(UpperCamelCase_ , torch.dtype )
def A ( UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase_ )
def A ( UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(UpperCamelCase_ , tf.Tensor )
def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(UpperCamelCase_ )
return type(UpperCamelCase_ ) == tf.Tensor
def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase_ , jnp.ndarray )
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(UpperCamelCase_ )
def A ( UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return [to_py_obj(UpperCamelCase_ ) for o in obj]
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ ).tolist()
elif isinstance(UpperCamelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( UpperCamelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return np.array(UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ )
else:
return obj
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(__magic_name__ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase__ = getattr(self , class_fields[0].name )
lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = first_field.items()
lowerCAmelCase__ = True
else:
try:
lowerCAmelCase__ = iter(__magic_name__ )
lowerCAmelCase__ = True
except TypeError:
lowerCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__magic_name__ ):
if (
not isinstance(__magic_name__ , (list, tuple) )
or not len(__magic_name__ ) == 2
or not isinstance(element[0] , __magic_name__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase__ = element[1]
elif first_field is not None:
lowerCAmelCase__ = first_field
else:
for field in class_fields:
lowerCAmelCase__ = getattr(self , field.name )
if v is not None:
lowerCAmelCase__ = v
def __delitem__( self : Union[str, Any] , *__magic_name__ : Tuple , **__magic_name__ : Dict ):
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Tuple , **__magic_name__ : Dict ):
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__magic_name__ : List[Any] , **__magic_name__ : List[Any] ):
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Union[str, Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__magic_name__ , __magic_name__ )
super().__setattr__(__magic_name__ , __magic_name__ )
def __setitem__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
super().__setitem__(__magic_name__ , __magic_name__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : int , __magic_name__ : Optional[int] ):
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = 'longest'
snake_case__ :Optional[Any] = 'max_length'
snake_case__ :str = 'do_not_pad'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'pt'
snake_case__ :Union[str, Any] = 'tf'
snake_case__ :Any = 'np'
snake_case__ :List[str] = 'jax'
class A :
def __init__( self : Dict , __magic_name__ : List[ContextManager] ):
"""simple docstring"""
lowerCAmelCase__ = context_managers
lowerCAmelCase__ = ExitStack()
def __enter__( self : str ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__magic_name__ )
def __exit__( self : Dict , *__magic_name__ : Dict , **__magic_name__ : int ):
"""simple docstring"""
self.stack.__exit__(*__magic_name__ , **__magic_name__ )
def A ( UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = infer_framework(UpperCamelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = model_class.__name__
lowerCAmelCase__ = infer_framework(UpperCamelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( UpperCamelCase_ : MutableMapping , UpperCamelCase_ : str = "" , UpperCamelCase_ : str = "." ) -> List[Any]:
'''simple docstring'''
def _flatten_dict(UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]="" , UpperCamelCase_ : Any="." ):
for k, v in d.items():
lowerCAmelCase__ = str(UpperCamelCase_ ) + delimiter + str(UpperCamelCase_ ) if parent_key else k
if v and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
yield from flatten_dict(UpperCamelCase_ , UpperCamelCase_ , delimiter=UpperCamelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
@contextmanager
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = False ) -> Any:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.T if axes is None else array.permute(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase_ , perm=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.reshape(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.reshape(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=None ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.expand_dims(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.unsqueeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.size(UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.size(UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(UpperCamelCase_ , (tuple, list) ):
lowerCAmelCase__ = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ = F"""{repo_id}--{value}"""
return auto_map
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
for base_class in inspect.getmro(UpperCamelCase_ ):
lowerCAmelCase__ = base_class.__module__
lowerCAmelCase__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 48
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A : str = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''AutoTokenizer'''
lowerCamelCase__ = ['''tokenizer''']
lowerCamelCase__ = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Dict , __magic_name__ : str , __magic_name__ : Tuple=None ) -> Union[str, Any]:
super().__init__(__magic_name__ )
SCREAMING_SNAKE_CASE_ = speaker_embeddings
@classmethod
def __A ( cls : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict="speaker_embeddings_path.json" , **__magic_name__ : str ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE_ = get_file_from_repo(
__magic_name__ , __magic_name__ , subfolder=kwargs.pop("subfolder" , __magic_name__ ) , cache_dir=kwargs.pop("cache_dir" , __magic_name__ ) , force_download=kwargs.pop("force_download" , __magic_name__ ) , proxies=kwargs.pop("proxies" , __magic_name__ ) , resume_download=kwargs.pop("resume_download" , __magic_name__ ) , local_files_only=kwargs.pop("local_files_only" , __magic_name__ ) , use_auth_token=kwargs.pop("use_auth_token" , __magic_name__ ) , revision=kwargs.pop("revision" , __magic_name__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(__magic_name__ , __magic_name__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
SCREAMING_SNAKE_CASE_ = None
else:
with open(__magic_name__ ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__magic_name__ , **__magic_name__ )
return cls(tokenizer=__magic_name__ , speaker_embeddings=__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Dict="speaker_embeddings_path.json" , __magic_name__ : str="speaker_embeddings" , __magic_name__ : bool = False , **__magic_name__ : Dict , ) -> int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__magic_name__ , __magic_name__ , "v2" ) , exist_ok=__magic_name__ )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE_ = self._load_voice_preset(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __magic_name__ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = os.path.join(__magic_name__ , F'''{prompt_key}_{key}.npy''' )
SCREAMING_SNAKE_CASE_ = tmp_dict
with open(os.path.join(__magic_name__ , __magic_name__ ) , "w" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
super().save_pretrained(__magic_name__ , __magic_name__ , **__magic_name__ )
def __A ( self : Any , __magic_name__ : str = None , **__magic_name__ : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
SCREAMING_SNAKE_CASE_ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __magic_name__ ) , cache_dir=kwargs.pop("cache_dir" , __magic_name__ ) , force_download=kwargs.pop("force_download" , __magic_name__ ) , proxies=kwargs.pop("proxies" , __magic_name__ ) , resume_download=kwargs.pop("resume_download" , __magic_name__ ) , local_files_only=kwargs.pop("local_files_only" , __magic_name__ ) , use_auth_token=kwargs.pop("use_auth_token" , __magic_name__ ) , revision=kwargs.pop("revision" , __magic_name__ ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
SCREAMING_SNAKE_CASE_ = np.load(__magic_name__ )
return voice_preset_dict
def __A ( self : int , __magic_name__ : Optional[dict] = None ) -> List[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any] , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , __magic_name__ : int="pt" , __magic_name__ : List[str]=256 , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=True , __magic_name__ : Dict=False , **__magic_name__ : int , ) -> List[Any]:
if voice_preset is not None and not isinstance(__magic_name__ , __magic_name__ ):
if (
isinstance(__magic_name__ , __magic_name__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE_ = self._load_voice_preset(__magic_name__ )
else:
if isinstance(__magic_name__ , __magic_name__ ) and not voice_preset.endswith(".npz" ):
SCREAMING_SNAKE_CASE_ = voice_preset + ".npz"
SCREAMING_SNAKE_CASE_ = np.load(__magic_name__ )
if voice_preset is not None:
self._validate_voice_preset_dict(__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
__magic_name__ , return_tensors=__magic_name__ , padding="max_length" , max_length=__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , add_special_tokens=__magic_name__ , **__magic_name__ , )
if voice_preset is not None:
SCREAMING_SNAKE_CASE_ = voice_preset
return encoded_text
| 140
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : Tuple = get_tests_dir('fixtures')
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> str:
# A mock response for an HTTP head request to emulate server down
__UpperCAmelCase = mock.Mock()
__UpperCAmelCase = 500
__UpperCAmelCase = {}
__UpperCAmelCase = HTTPError
__UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase__ ) as mock_head:
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ (self ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ (cls ) -> int:
__UpperCAmelCase = TOKEN
HfFolder.save_token(lowercase__ )
@classmethod
def lowerCAmelCase_ (cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase__ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase__ , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase__ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase__ , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (self ) -> int:
CustomFeatureExtractor.register_for_auto_class()
__UpperCAmelCase = CustomFeatureExtractor.from_pretrained(lowercase__ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=lowercase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 303
|
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
__UpperCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 303
| 1
|
from itertools import product
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
__lowercase : Union[str, Any] = sides_number
__lowercase : str = max_face_number * dice_number
__lowercase : int = [0] * (max_total + 1)
__lowercase : Optional[Any] = 1
__lowercase : List[Any] = range(__lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCAmelCase , repeat=__lowerCAmelCase ):
__lowercase : str = sum(__lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCAmelCase_ ( ) -> float:
__lowercase : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowercase : Optional[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowercase : Optional[Any] = 0
__lowercase : Optional[int] = 9
__lowercase : Optional[int] = 4 * 9
__lowercase : List[Any] = 6
for peter_total in range(__lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowercase : str = (4**9) * (6**6)
__lowercase : int = peter_wins_count / total_games_number
__lowercase : Optional[int] = round(__lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }')
| 715
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
A__ : Optional[datasets.Features] = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
A__ : List[str] = PandasConfig
def snake_case_ ( self : int ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ ( self : int , _snake_case : Optional[int] ):
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__lowercase : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
__lowercase : str = data_files
if isinstance(_snake_case , _snake_case ):
__lowercase : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : Optional[int] = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowercase : str = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
__lowercase : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : int = [dl_manager.iter_files(_snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={'''files''': files} ) )
return splits
def snake_case_ ( self : Optional[int] , _snake_case : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase : List[str] = table_cast(_snake_case , self.config.features.arrow_schema )
return pa_table
def snake_case_ ( self : int , _snake_case : Union[str, Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case , '''rb''' ) as f:
__lowercase : Dict = pa.Table.from_pandas(pd.read_pickle(_snake_case ) )
yield i, self._cast_table(_snake_case )
| 284
| 0
|
_lowerCamelCase : Tuple = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_lowerCamelCase : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_lowerCamelCase : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_lowerCamelCase : List[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_lowerCamelCase : List[Any] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_lowerCamelCase : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_lowerCamelCase : Optional[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_lowerCamelCase : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 87
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase__ ( lowerCamelCase ):
return EnvironmentCommand()
class _lowerCAmelCase ( __UpperCAmelCase ):
@staticmethod
def A ( lowerCAmelCase_ ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = parser.add_parser('env' )
download_parser.set_defaults(func=lowerCAmelCase_ )
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = huggingface_hub.__version__
_SCREAMING_SNAKE_CASE : Optional[Any] = 'not installed'
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'NA'
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.__version__
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.is_available()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'not installed'
if is_transformers_available():
import transformers
_SCREAMING_SNAKE_CASE : Tuple = transformers.__version__
_SCREAMING_SNAKE_CASE : Optional[int] = 'not installed'
if is_accelerate_available():
import accelerate
_SCREAMING_SNAKE_CASE : str = accelerate.__version__
_SCREAMING_SNAKE_CASE : str = 'not installed'
if is_xformers_available():
import xformers
_SCREAMING_SNAKE_CASE : Optional[int] = xformers.__version__
_SCREAMING_SNAKE_CASE : Any = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def A ( lowerCAmelCase_ ) -> int:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 621
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase_:
'''simple docstring'''
lowercase__ : int
lowercase__ : int
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = [[] for _ in range(lowerCamelCase__ )]
_lowerCamelCase = size
def __getitem__( self , lowerCamelCase__ ):
return iter(self._graph[vertex] )
@property
def snake_case__ ( self ):
return self._size
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase__ , lowerCamelCase__ ) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = deque([start_vertex] )
_lowerCamelCase = [None] * self.size
_lowerCamelCase = 0
while queue:
_lowerCamelCase = queue.popleft()
_lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowerCamelCase = current_distance + edge.weight
_lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
_lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = StableDiffusionSAGPipeline
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[str] = False
def __snake_case ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
__snake_case : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__snake_case : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
__snake_case : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case : Optional[Any] = CLIPTextModel(lowerCamelCase )
__snake_case : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=0 ) -> List[Any]:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Dict = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : int = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : int ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__snake_case : Tuple = sag_pipe.to(lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = "."
__snake_case : int = torch.manual_seed(0 )
__snake_case : int = sag_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
__snake_case : int = output.images
__snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __snake_case ( self : str ) -> Dict:
__snake_case : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__snake_case : Tuple = sag_pipe.to(lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = "."
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : str = sag_pipe(
[prompt] , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
__snake_case : Tuple = output.images
__snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __snake_case ( self : Tuple ) -> List[str]:
__snake_case : str = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__snake_case : List[Any] = sag_pipe.to(lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = "."
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Any = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
__snake_case : Tuple = output.images
assert image.shape == (1, 512, 768, 3)
| 81
|
def lowerCAmelCase_ ( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(__lowerCamelCase , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class _lowerCamelCase( _a ):
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowercase : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowercase : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowercase : Union[str, Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase)
BertModel.from_pretrained(lowerCamelCase)
BertTokenizer.from_pretrained(lowerCamelCase)
pipeline(task='fill-mask', model=lowerCamelCase)
# baseline - just load from_pretrained with normal network
_lowercase : Any = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
_lowercase : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Any = '1'
_lowercase : List[Any] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowercase : Union[str, Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowercase : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowercase : Any = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase)
BertModel.from_pretrained(lowerCamelCase)
BertTokenizer.from_pretrained(lowerCamelCase)
pipeline(task='fill-mask', model=lowerCamelCase)
# baseline - just load from_pretrained with normal network
_lowercase : List[str] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
_lowercase : Any = self.get_env()
_lowercase : str = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_lowercase : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_lowercase : List[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_lowercase : int = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
_lowercase : List[str] = self.get_env()
_lowercase : Dict = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# next emulate no network
_lowercase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Optional[Any] = '1'
_lowercase : Optional[Any] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
@require_torch
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = '\nfrom transformers import pipeline\n '
_lowercase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_lowercase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_lowercase : Tuple = self.get_env()
_lowercase : Tuple = '1'
_lowercase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
_lowercase : Tuple = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 1, result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode', result.stderr.decode().replace('\n', ''), )
@require_torch
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = '\nfrom transformers import AutoModel\n '
_lowercase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_lowercase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
_lowercase : int = self.get_env()
_lowercase : List[str] = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowercase : Tuple = '1'
_lowercase : Dict = subprocess.run(lowerCamelCase, env=lowerCamelCase, check=lowerCamelCase, capture_output=lowerCamelCase)
self.assertEqual(result.returncode, 0, result.stderr)
self.assertIn('success', result.stdout.decode())
| 354
| 1
|
import math
def __lowerCamelCase (UpperCAmelCase__ : int ):
return math.sqrt(UpperCAmelCase__ ) * math.sqrt(UpperCAmelCase__ ) == num
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = n
while left <= right:
SCREAMING_SNAKE_CASE = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE = mid - 1
else:
SCREAMING_SNAKE_CASE = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 403
|
from __future__ import annotations
import math
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : float ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , )
)
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
SCREAMING_SNAKE_CASE = math.log(len(UpperCAmelCase__ ) , 2 )
print(F"Optimal value : {minimax(0 , 0 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 403
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =DanceDiffusionPipeline
snake_case =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case =PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
snake_case =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case =False
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_UpperCAmelCase =IPNDMScheduler()
_UpperCAmelCase ={
"unet": unet,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =DanceDiffusionPipeline(**_snake_case )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =pipe(**_snake_case )
_UpperCAmelCase =output.audios
_UpperCAmelCase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase =np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE ( self ):
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =torch_device
_UpperCAmelCase =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(generator=_snake_case , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase =output.audios
_UpperCAmelCase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase =np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =torch_device
_UpperCAmelCase =DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(generator=_snake_case , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase =output.audios
_UpperCAmelCase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase =np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 592
|
def lowerCamelCase__ ( _lowerCamelCase = 50 ) ->int:
_UpperCAmelCase =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 592
| 1
|
SCREAMING_SNAKE_CASE :Union[str, Any] = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE :Tuple = [1, 3, 5, 7, 9]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A = 0
for digit in range(1_0 ):
__A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , a_ , a_ )
return result
__A = 0
for digita in range(1_0 ):
__A = digita
if (remainder + digita) % 2 == 0:
__A = ODD_DIGITS
else:
__A = EVEN_DIGITS
for digita in other_parity_digits:
__A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , a_ , a_ , )
return result
def UpperCAmelCase ( a_ = 9 ) -> int:
"""simple docstring"""
__A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a_ , 0 , [0] * length , a_ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowerCamelCase : List[str] = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
_lowerCamelCase : Dict = '''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
_lowerCamelCase : str = '''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
_lowerCamelCase : Union[str, Any] = '''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__=0.9 , lowercase__=3 , lowercase__=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version('''3.6.5''' ):
__A =[
meteor_score.single_meteor_score(
word_tokenize(lowercase__ ) , word_tokenize(lowercase__ ) , alpha=lowercase__ , beta=lowercase__ , gamma=lowercase__ )
for ref, pred in zip(lowercase__ , lowercase__ )
]
else:
__A =[
meteor_score.single_meteor_score(lowercase__ , lowercase__ , alpha=lowercase__ , beta=lowercase__ , gamma=lowercase__ )
for ref, pred in zip(lowercase__ , lowercase__ )
]
return {"meteor": np.mean(lowercase__ )}
| 184
| 0
|
"""simple docstring"""
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]:
_SCREAMING_SNAKE_CASE : Any = 1
_SCREAMING_SNAKE_CASE : str = 2
while i * i <= n:
_SCREAMING_SNAKE_CASE : str = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : Any = 1
while True:
i += 1
t_num += i
if count_divisors(__SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 635
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
_SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""")
model.to(_A)
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""")
_SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""")
_SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A)
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Any = model(**_A)
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6))
self.assertEqual(logits.shape , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
| 635
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 3_84
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = """gelu"""
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 5_12
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = 1_28
_UpperCamelCase = 2
_UpperCamelCase = 9
_UpperCamelCase = 1
_UpperCamelCase = None
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self , a , a , a , a , a , a , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFConvBertModel(config=snake_case_ )
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(snake_case_ )
_UpperCamelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFConvBertForMaskedLM(config=snake_case_ )
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , a , a , a , a , a , a , a ) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForSequenceClassification(config=snake_case_ )
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , a , a , a , a , a , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFConvBertForMultipleChoice(config=snake_case_ )
_UpperCamelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCamelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , a , a , a , a , a , a , a ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFConvBertForTokenClassification(config=snake_case_ )
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , a , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFConvBertForQuestionAnswering(config=snake_case_ )
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
UpperCamelCase_ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Optional[int] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Union[str, Any] = False
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFConvBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def A_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = True
if hasattr(snake_case_ , """use_cache""" ):
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
for model_class in self.all_model_classes:
_UpperCamelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCamelCase = model_class(snake_case_ )
_UpperCamelCase = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
_UpperCamelCase = os.path.join(snake_case_ , """saved_model""" , """1""" )
_UpperCamelCase = tf.keras.models.load_model(snake_case_ )
_UpperCamelCase = model(snake_case_ )
if self.is_encoder_decoder:
_UpperCamelCase = outputs["""encoder_hidden_states"""]
_UpperCamelCase = outputs["""encoder_attentions"""]
else:
_UpperCamelCase = outputs["""hidden_states"""]
_UpperCamelCase = outputs["""attentions"""]
self.assertEqual(len(snake_case_ ) , snake_case_ )
_UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(snake_case_ )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_UpperCamelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
_UpperCamelCase = getattr(self.model_tester , """key_length""" , snake_case_ )
def check_decoder_attentions_output(a ):
_UpperCamelCase = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
_UpperCamelCase = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(a ):
_UpperCamelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = model_class(snake_case_ )
_UpperCamelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCamelCase = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
_UpperCamelCase = model_class(snake_case_ )
_UpperCamelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(snake_case_ )
_UpperCamelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(snake_case_ )
_UpperCamelCase = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(snake_case_ )[0]
_UpperCamelCase = [1, 6, 7_68]
self.assertEqual(output.shape , snake_case_ )
_UpperCamelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-4 )
| 612
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
__SCREAMING_SNAKE_CASE :str = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """tapas"""
def __init__( self : Any , snake_case_ : Union[str, Any]=3_0_5_2_2 , snake_case_ : Tuple=7_6_8 , snake_case_ : List[Any]=1_2 , snake_case_ : Tuple=1_2 , snake_case_ : Dict=3_0_7_2 , snake_case_ : Dict="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=1_0_2_4 , snake_case_ : Dict=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , snake_case_ : Dict=0.0_2 , snake_case_ : Optional[int]=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=1_0.0 , snake_case_ : Tuple=0 , snake_case_ : Optional[int]=1.0 , snake_case_ : Optional[Any]=None , snake_case_ : Any=1.0 , snake_case_ : List[str]=False , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=1.0 , snake_case_ : Optional[int]=1.0 , snake_case_ : Tuple=False , snake_case_ : Optional[int]=False , snake_case_ : str="ratio" , snake_case_ : Dict=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=6_4 , snake_case_ : Optional[int]=3_2 , snake_case_ : List[str]=False , snake_case_ : int=True , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , **snake_case_ : str , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_sizes
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
_UpperCAmelCase = positive_label_weight
_UpperCAmelCase = num_aggregation_labels
_UpperCAmelCase = aggregation_loss_weight
_UpperCAmelCase = use_answer_as_supervision
_UpperCAmelCase = answer_loss_importance
_UpperCAmelCase = use_normalized_answer_loss
_UpperCAmelCase = huber_loss_delta
_UpperCAmelCase = temperature
_UpperCAmelCase = aggregation_temperature
_UpperCAmelCase = use_gumbel_for_cells
_UpperCAmelCase = use_gumbel_for_aggregation
_UpperCAmelCase = average_approximation_function
_UpperCAmelCase = cell_selection_preference
_UpperCAmelCase = answer_loss_cutoff
_UpperCAmelCase = max_num_rows
_UpperCAmelCase = max_num_columns
_UpperCAmelCase = average_logits_per_cell
_UpperCAmelCase = select_one_column
_UpperCAmelCase = allow_empty_column_selection
_UpperCAmelCase = init_cell_selection_weights_to_zero
_UpperCAmelCase = reset_position_index_per_cell
_UpperCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
_UpperCAmelCase = aggregation_labels
_UpperCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , snake_case_ ):
_UpperCAmelCase = {int(snake_case_ ): v for k, v in aggregation_labels.items()}
| 236
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ : Dict = '▁'
__magic_name__ : Optional[Any] = {'vocab_file': 'spiece.model'}
__magic_name__ : int = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__magic_name__ : Optional[Any] = {
'google/pegasus-xsum': 5_1_2,
}
__magic_name__ : Dict = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<pad>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<mask_2>" , __UpperCamelCase="<mask_1>" , __UpperCamelCase=None , __UpperCamelCase=103 , __UpperCamelCase = None , **__UpperCamelCase , ):
A_ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError(
f'additional_special_tokens should be of type {type(__UpperCamelCase )}, but is'
f' {type(__UpperCamelCase )}' )
A_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(__UpperCamelCase ) , self.offset - 1 )
]
if len(set(__UpperCamelCase ) ) != len(__UpperCamelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
A_ = additional_special_tokens_extended
else:
A_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , mask_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token_sent=__UpperCamelCase , offset=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
A_ = mask_token_sent
A_ = vocab_file
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
# add special tokens to encoder dict
A_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
A_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase_ ( self ):
return len(self.sp_model ) + self.offset
def lowercase_ ( self ):
A_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self , __UpperCamelCase ):
A_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , __UpperCamelCase ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A_ = self.sp_model.piece_to_id(__UpperCamelCase )
return sp_id + self.offset
def lowercase_ ( self , __UpperCamelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase_ ( self , __UpperCamelCase ):
A_ = []
A_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A_ = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def lowercase_ ( self , __UpperCamelCase=False ):
return 1
def lowercase_ ( self , __UpperCamelCase ):
A_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(__UpperCamelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 608
|
import math
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float )-> float:
return math.pow(snake_case__ , 2 ) - a
def lowerCAmelCase ( snake_case__ : float )-> float:
return 2 * x
def lowerCAmelCase ( snake_case__ : float )-> float:
A_ = 2.0
while start <= a:
A_ = math.pow(snake_case__ , 2 )
return start
def lowerCAmelCase ( snake_case__ : float , snake_case__ : int = 9999 , snake_case__ : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 )-> float:
if a < 0:
raise ValueError("math domain error" )
A_ = get_initial_point(snake_case__ )
for _ in range(snake_case__ ):
A_ = value
A_ = value - fx(snake_case__ , snake_case__ ) / fx_derivative(snake_case__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 608
| 1
|
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowercase : str = None ):
'''simple docstring'''
__a = value
__a = random()
__a = None
__a = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : List[Any] ):
'''simple docstring'''
__a = str(self.value ) + """ """
__a = str(self.left or """""" )
__a = str(self.right or """""" )
return value + left + right
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__a , __a = split(root.left , UpperCAmelCase__ )
return left, root
else:
__a , __a = split(root.right , UpperCAmelCase__ )
return root, right
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__a = merge(left.right , UpperCAmelCase__ )
return left
else:
__a = merge(UpperCAmelCase__ , right.left )
return right
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = Node(UpperCAmelCase__ )
__a , __a = split(UpperCAmelCase__ , UpperCAmelCase__ )
return merge(merge(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a , __a = split(UpperCAmelCase__ , value - 1 )
__a , __a = split(UpperCAmelCase__ , UpperCAmelCase__ )
return merge(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__a = insert(UpperCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
__a = erase(UpperCAmelCase__ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
__a = input()
while args != "q":
__a = interact_treap(UpperCAmelCase__ , UpperCAmelCase__ )
print(UpperCAmelCase__ )
__a = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 225
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , UpperCAmelCase__ ).groups()[0]
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=None , __a=None ):
'''simple docstring'''
lowerCamelCase = file_names
lowerCamelCase = image_transform
lowerCamelCase = label_to_id
def __len__(self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__(self , __a ):
'''simple docstring'''
lowerCamelCase = self.file_names[idx]
lowerCamelCase = PIL.Image.open(__a )
lowerCamelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCamelCase = self.image_transform(__a )
lowerCamelCase = extract_label(__a )
if self.label_to_id is not None:
lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if args.with_tracking:
lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["lr"]
lowerCamelCase = int(config["num_epochs"] )
lowerCamelCase = int(config["seed"] )
lowerCamelCase = int(config["batch_size"] )
lowerCamelCase = config["image_size"]
if not isinstance(UpperCAmelCase__ , (list, tuple) ):
lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase = os.path.split(UpperCAmelCase__ )[-1].split("." )[0]
accelerator.init_trackers(UpperCAmelCase__ , UpperCAmelCase__ )
# Grab all the image filenames
lowerCamelCase = [os.path.join(args.data_dir , UpperCAmelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCamelCase = [extract_label(UpperCAmelCase__ ) for fname in file_names]
lowerCamelCase = list(set(UpperCAmelCase__ ) )
id_to_label.sort()
lowerCamelCase = {lbl: i for i, lbl in enumerate(UpperCAmelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCAmelCase__ )
torch.manual_seed(UpperCAmelCase__ )
torch.cuda.manual_seed_all(UpperCAmelCase__ )
# Split our filenames between train and validation
lowerCamelCase = np.random.permutation(len(UpperCAmelCase__ ) )
lowerCamelCase = int(0.8 * len(UpperCAmelCase__ ) )
lowerCamelCase = random_perm[:cut]
lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase = Compose([RandomResizedCrop(UpperCAmelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCAmelCase__ , label_to_id=UpperCAmelCase__ )
# For evaluation, we use a deterministic Resize
lowerCamelCase = Compose([Resize(UpperCAmelCase__ ), ToTensor()] )
lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCAmelCase__ , label_to_id=UpperCAmelCase__ )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
lowerCamelCase = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = create_model("resnet50d" , pretrained=UpperCAmelCase__ , num_classes=len(UpperCAmelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase = False
for param in model.get_classifier().parameters():
lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCamelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase = OneCycleLR(optimizer=UpperCAmelCase__ , max_lr=UpperCAmelCase__ , epochs=UpperCAmelCase__ , steps_per_epoch=len(UpperCAmelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase = os.path.splitext(UpperCAmelCase__ )[0]
if "epoch" in training_difference:
lowerCamelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCamelCase = None
else:
lowerCamelCase = int(training_difference.replace("step_" , "" ) )
lowerCamelCase = resume_step // len(UpperCAmelCase__ )
resume_step -= starting_epoch * len(UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
if args.with_tracking:
lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase = accelerator.skip_first_batches(UpperCAmelCase__ , UpperCAmelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase = (batch["image"] - mean) / std
lowerCamelCase = model(UpperCAmelCase__ )
lowerCamelCase = torch.nn.functional.cross_entropy(UpperCAmelCase__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
model.eval()
lowerCamelCase = 0
lowerCamelCase = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase = (batch["image"] - mean) / std
with torch.no_grad():
lowerCamelCase = model(UpperCAmelCase__ )
lowerCamelCase = outputs.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(UpperCAmelCase__ ),
"epoch": epoch,
} , step=UpperCAmelCase__ , )
if checkpointing_steps == "epoch":
lowerCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
if args.with_tracking:
accelerator.end_training()
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=UpperCAmelCase__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCAmelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 623
| 0
|
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 711
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def UpperCAmelCase_ ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if "resnet-50" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
lowerCAmelCase__ = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
lowerCAmelCase__ = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
lowerCAmelCase__ = 'panoptic' in model_name
if is_panoptic:
lowerCAmelCase__ = 250
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'coco-detection-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase_ ( snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
def UpperCAmelCase_ ( snake_case__ , snake_case__=False ) -> str:
"""simple docstring"""
lowerCAmelCase__ = ''
if is_panoptic:
lowerCAmelCase__ = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
lowerCAmelCase__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[:256]
lowerCAmelCase__ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase__ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase__ = in_proj_bias_cross_attn[-256:]
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = get_detr_config(snake_case__ )
# load original model from torch hub
lowerCAmelCase__ = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f'Converting model {model_name}...' )
lowerCAmelCase__ = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
lowerCAmelCase__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
lowerCAmelCase__ = 'detr.' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase__ = state_dict.pop(snake_case__ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCAmelCase__ = DetrImageProcessor(format=snake_case__ )
lowerCAmelCase__ = processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ = encoding['pixel_values']
lowerCAmelCase__ = detr(snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 604
| 0
|
'''simple docstring'''
import math
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
assert isinstance(__UpperCAmelCase, __UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_a = range(3, int(math.sqrt(__UpperCAmelCase ) + 1 ), 2 )
return not any(not number % i for i in odd_numbers )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Any=1, **lowerCamelCase__ : List[str] ):
_a = factor * value
_a = value
while not is_prime(__UpperCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1, **__UpperCAmelCase )
return value
| 131
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __a ( _snake_case ):
__UpperCamelCase : str = 'esm'
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : str=None ,lowerCamelCase : Any=None ,lowerCamelCase : Union[str, Any]=768 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : int=12 ,lowerCamelCase : Optional[int]=3072 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : Any=1026 ,lowerCamelCase : str=0.02 ,lowerCamelCase : int=1E-1_2 ,lowerCamelCase : Union[str, Any]="absolute" ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : str=None ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : int=False ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any=None ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,mask_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = emb_layer_norm_before
__SCREAMING_SNAKE_CASE = token_dropout
__SCREAMING_SNAKE_CASE = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__SCREAMING_SNAKE_CASE = EsmFoldConfig()
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = EsmFoldConfig(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__SCREAMING_SNAKE_CASE = get_default_vocab_list()
else:
__SCREAMING_SNAKE_CASE = vocab_list
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,lowerCamelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().to_dict()
if isinstance(self.esmfold_config ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : str = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : float = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : int = 128
__UpperCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.trunk is None:
__SCREAMING_SNAKE_CASE = TrunkConfig()
elif isinstance(self.trunk ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.trunk.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 48
__UpperCamelCase : int = 1024
__UpperCamelCase : int = 128
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : float = 0
__UpperCamelCase : float = 0
__UpperCamelCase : bool = False
__UpperCamelCase : int = 4
__UpperCamelCase : Optional[int] = 128
__UpperCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.structure_module is None:
__SCREAMING_SNAKE_CASE = StructureModuleConfig()
elif isinstance(self.structure_module ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width
__SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.structure_module.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 384
__UpperCamelCase : int = 128
__UpperCamelCase : int = 16
__UpperCamelCase : int = 128
__UpperCamelCase : int = 12
__UpperCamelCase : int = 4
__UpperCamelCase : int = 8
__UpperCamelCase : float = 0.1
__UpperCamelCase : int = 8
__UpperCamelCase : int = 1
__UpperCamelCase : int = 2
__UpperCamelCase : int = 7
__UpperCamelCase : int = 10
__UpperCamelCase : float = 1E-8
__UpperCamelCase : float = 1E5
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return asdict(self )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 109
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = """owlvit_text_model"""
def __init__( self : Optional[int] , __lowerCamelCase : List[Any]=49_408 , __lowerCamelCase : Dict=512 , __lowerCamelCase : List[str]=2_048 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[int]="quick_gelu" , __lowerCamelCase : str=1E-5 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Optional[Any]=1.0 , __lowerCamelCase : Dict=0 , __lowerCamelCase : Tuple=49_406 , __lowerCamelCase : List[Any]=49_407 , **__lowerCamelCase : int , ):
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :Union[str, Any] = hidden_size
UpperCamelCase :str = intermediate_size
UpperCamelCase :int = num_hidden_layers
UpperCamelCase :Optional[Any] = num_attention_heads
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :int = layer_norm_eps
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Any = initializer_range
UpperCamelCase :int = initializer_factor
@classmethod
def _A ( cls : List[str] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[Any] ):
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
UpperCamelCase :Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : int = """owlvit_vision_model"""
def __init__( self : str , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=3_072 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=768 , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Optional[int]="quick_gelu" , __lowerCamelCase : Any=1E-5 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[str]=1.0 , **__lowerCamelCase : str , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Any = intermediate_size
UpperCamelCase :Any = num_hidden_layers
UpperCamelCase :List[str] = num_attention_heads
UpperCamelCase :List[Any] = num_channels
UpperCamelCase :int = image_size
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :List[Any] = hidden_act
UpperCamelCase :List[str] = layer_norm_eps
UpperCamelCase :Optional[Any] = attention_dropout
UpperCamelCase :str = initializer_range
UpperCamelCase :Any = initializer_factor
@classmethod
def _A ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[Any] ):
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
UpperCamelCase :Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = """owlvit"""
snake_case__ : Any = True
def __init__( self : List[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Optional[Any]=2.6592 , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : int , ):
super().__init__(**__lowerCamelCase )
if text_config is None:
UpperCamelCase :int = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
UpperCamelCase :Tuple = OwlViTTextConfig(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = OwlViTVisionConfig(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = projection_dim
UpperCamelCase :Union[str, Any] = logit_scale_init_value
UpperCamelCase :List[Any] = return_dict
UpperCamelCase :List[Any] = 1.0
@classmethod
def _A ( cls : Optional[int] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Optional[int] ):
cls._set_token_in_kwargs(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Any = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _A ( cls : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict , **__lowerCamelCase : int ):
UpperCamelCase :List[str] = {}
UpperCamelCase :Optional[int] = text_config
UpperCamelCase :List[str] = vision_config
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
def _A ( self : str ):
UpperCamelCase :Optional[int] = copy.deepcopy(self.__dict__ )
UpperCamelCase :int = self.text_config.to_dict()
UpperCamelCase :int = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( _a ):
@property
def _A ( self : int ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _A ( self : Dict ):
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _A ( self : int ):
return 1E-4
def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : Optional["TensorType"] = None , ):
UpperCamelCase :Union[str, Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , framework=__lowerCamelCase )
UpperCamelCase :Optional[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__lowerCamelCase , framework=__lowerCamelCase )
return {**text_input_dict, **image_input_dict}
@property
def _A ( self : Dict ):
return 14
| 590
|
import random
from typing import Any
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__magic_name__ ) ):
UpperCamelCase :Dict = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCamelCase :List[str] = random.randint(0 , len(__magic_name__ ) - 1 )
UpperCamelCase , UpperCamelCase :List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 590
| 1
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand()
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : int = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Any , _lowercase : int , *_lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = accelerate_config_file
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE__ : int = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE__ : Any = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else f"""\t{accelerate_config}"""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = torch.__version__
SCREAMING_SNAKE_CASE__ : int = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : int = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE__ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE__ : List[Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ : Optional[int] = flax.__version__
SCREAMING_SNAKE_CASE__ : Optional[int] = jax.__version__
SCREAMING_SNAKE_CASE__ : int = jaxlib.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Any ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( __a : List[str] ):
"""simple docstring"""
a__ = tmp_path / """file.csv"""
a__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def A_ ( __a : Union[str, Any] ):
"""simple docstring"""
a__ = tmp_path / """malformed_file.csv"""
a__ = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def A_ ( __a : Tuple , __a : Union[str, Any] ):
"""simple docstring"""
a__ = tmp_path / """csv_with_image.csv"""
a__ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def A_ ( __a : int ):
"""simple docstring"""
a__ = tmp_path / """csv_with_label.csv"""
a__ = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def A_ ( __a : Optional[Any] ):
"""simple docstring"""
a__ = tmp_path / """csv_with_int_list.csv"""
a__ = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(__a , """w""" ) as f:
f.write(__a )
return str(__a )
def A_ ( __a : Dict , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
a__ = Csv()
a__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__a , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(__a ) in record.message
for record in caplog.records )
@require_pil
def A_ ( __a : int ):
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
a__ = f.read().splitlines()[1]
a__ = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
a__ = csv._generate_tables([[csv_file_with_image]] )
a__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
a__ = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( __a : Optional[Any] ):
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
a__ = f.read().splitlines()[1:]
a__ = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
a__ = csv._generate_tables([[csv_file_with_label]] )
a__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
a__ = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(__a ) for label in labels]
def A_ ( __a : str ):
"""simple docstring"""
a__ = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda __a : [int(__a ) for i in x.split()]} )
a__ = csv._generate_tables([[csv_file_with_int_list]] )
a__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
a__ = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 351
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 351
| 1
|
"""simple docstring"""
def _snake_case ( snake_case__ : int = 10 ):
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('Invalid input' )
A = 10**n
A = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 91
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = ['''model.decoder.embed_positions.weights''']
def A__ ( A__ ) -> Any:
'''simple docstring'''
if "emb" in name:
_UpperCAmelCase = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCAmelCase = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_UpperCAmelCase = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_UpperCAmelCase = name.replace("linear1" , "fc1" )
if "linear2" in name:
_UpperCAmelCase = name.replace("linear2" , "fc2" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCAmelCase = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_UpperCAmelCase = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_UpperCAmelCase = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCAmelCase = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A__ ( A__ , A__ ) -> Tuple[Dict, Dict]:
'''simple docstring'''
_UpperCAmelCase = list(state_dict.keys() )
_UpperCAmelCase = {}
for key in keys:
_UpperCAmelCase = state_dict.pop(A__ )
_UpperCAmelCase = rename_keys(A__ )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCAmelCase = val[:hidden_size, :]
_UpperCAmelCase = val[hidden_size : 2 * hidden_size, :]
_UpperCAmelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCAmelCase = val
else:
_UpperCAmelCase = val
return state_dict, enc_dec_proj_state_dict
def A__ ( A__ ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
_UpperCAmelCase = 1024
_UpperCAmelCase = 24
_UpperCAmelCase = 16
elif checkpoint == "medium":
_UpperCAmelCase = 1536
_UpperCAmelCase = 48
_UpperCAmelCase = 24
elif checkpoint == "large":
_UpperCAmelCase = 2048
_UpperCAmelCase = 48
_UpperCAmelCase = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_UpperCAmelCase = MusicgenDecoderConfig(
hidden_size=A__ , ffn_dim=hidden_size * 4 , num_hidden_layers=A__ , num_attention_heads=A__ , )
return config
@torch.no_grad()
def A__ ( A__ , A__=None , A__=None , A__="cpu" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = MusicGen.get_pretrained(A__ , device=A__ )
_UpperCAmelCase = decoder_config_from_checkpoint(A__ )
_UpperCAmelCase = fairseq_model.lm.state_dict()
_UpperCAmelCase , _UpperCAmelCase = rename_state_dict(
A__ , hidden_size=decoder_config.hidden_size )
_UpperCAmelCase = TaEncoderModel.from_pretrained("t5-base" )
_UpperCAmelCase = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCAmelCase = MusicgenForCausalLM(A__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCAmelCase , _UpperCAmelCase = decoder.load_state_dict(A__ , strict=A__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(A__ )
if len(A__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(A__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_UpperCAmelCase = MusicgenForConditionalGeneration(text_encoder=A__ , audio_encoder=A__ , decoder=A__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(A__ )
# check we can do a forward pass
_UpperCAmelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_UpperCAmelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_UpperCAmelCase = model(input_ids=A__ , decoder_input_ids=A__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCAmelCase = AutoTokenizer.from_pretrained("t5-base" )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_UpperCAmelCase = MusicgenProcessor(feature_extractor=A__ , tokenizer=A__ )
# set the appropriate bos/pad token ids
_UpperCAmelCase = 2048
_UpperCAmelCase = 2048
# set other default generation config params
_UpperCAmelCase = int(30 * audio_encoder.config.frame_rate )
_UpperCAmelCase = True
_UpperCAmelCase = 3.0
if pytorch_dump_folder is not None:
Path(A__ ).mkdir(exist_ok=A__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(A__ )
processor.push_to_hub(A__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 426
| 0
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
a__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "num_encoder_blocks" ) )
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=13 , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[Any]=[2, 2, 2, 2] , lowerCamelCase__ : str=[8, 4, 2, 1] , lowerCamelCase__ : Union[str, Any]=[16, 32, 64, 128] , lowerCamelCase__ : int=[1, 4, 8, 16] , lowerCamelCase__ : List[Any]=[1, 2, 4, 8] , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Optional[Any]=None , ):
a__ : List[str] = parent
a__ : Any = batch_size
a__ : Union[str, Any] = image_size
a__ : Optional[int] = num_channels
a__ : Union[str, Any] = num_encoder_blocks
a__ : List[str] = sr_ratios
a__ : str = depths
a__ : Any = hidden_sizes
a__ : Dict = downsampling_rates
a__ : Union[str, Any] = num_attention_heads
a__ : Optional[Any] = is_training
a__ : Tuple = use_labels
a__ : Optional[int] = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Union[str, Any] = initializer_range
a__ : Any = num_labels
a__ : Any = scope
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : int = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase( self : List[str] ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = SegformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Union[str, Any] = model(lowerCamelCase__ )
a__ : List[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ):
a__ : int = self.num_labels
a__ : Union[str, Any] = SegformerForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a__ : Any = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ):
a__ : Dict = 1
a__ : Dict = SegformerForSemanticSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase__ )
a__ : Dict = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCamelCase( self : int ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__ : List[str] = config_and_inputs
a__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = True
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[Any] = SegformerModelTester(self )
a__ : List[str] = SegformerConfigTester(self , config_class=lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCamelCase( self : Any ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCamelCase( self : Tuple ):
pass
def _UpperCamelCase( self : List[str] ):
a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[str] = [*signature.parameters.keys()]
a__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[int] = True
for model_class in self.all_model_classes:
a__ : Optional[int] = True
a__ : Union[str, Any] = False
a__ : Union[str, Any] = True
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a__ : List[Any] = outputs.attentions
a__ : int = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : Tuple = True
a__ : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a__ : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first attentions (first block, first layer)
a__ : str = (self.model_tester.image_size // 4) ** 2
a__ : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a__ : List[str] = (self.model_tester.image_size // 32) ** 2
a__ : List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a__ : int = len(lowerCamelCase__ )
# Check attention is always last and order is fine
a__ : Optional[int] = True
a__ : Dict = True
a__ : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
a__ : int = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first attentions (first block, first layer)
a__ : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
a__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCamelCase( self : Tuple ):
def check_hidden_states_output(lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ):
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
a__ : Dict = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
a__ : int = outputs.hidden_states
a__ : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : Any = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ):
continue
a__ : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Optional[int] = model(**lowerCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase( self : Union[str, Any] ):
pass
@slow
def _UpperCamelCase( self : int ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = SegformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Union[str, Any]:
a__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase( self : Tuple ):
# only resize + normalize
a__ : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase__ , align=lowerCamelCase__ , do_random_crop=lowerCamelCase__ )
a__ : Dict = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCamelCase__ )
a__ : Any = prepare_img()
a__ : int = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
a__ : Any = encoded_inputs.pixel_values.to(lowerCamelCase__ )
with torch.no_grad():
a__ : Union[str, Any] = model(lowerCamelCase__ )
a__ : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : Any = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Optional[int] ):
# only resize + normalize
a__ : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase__ , align=lowerCamelCase__ , do_random_crop=lowerCamelCase__ )
a__ : str = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(lowerCamelCase__ )
a__ : Any = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
a__ : int = encoded_inputs.pixel_values.to(lowerCamelCase__ )
with torch.no_grad():
a__ : Tuple = model(lowerCamelCase__ )
a__ : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCamelCase( self : str ):
# only resize + normalize
a__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase__ , align=lowerCamelCase__ , do_random_crop=lowerCamelCase__ )
a__ : List[Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCamelCase__ )
a__ : Union[str, Any] = prepare_img()
a__ : Any = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
a__ : Union[str, Any] = encoded_inputs.pixel_values.to(lowerCamelCase__ )
with torch.no_grad():
a__ : str = model(lowerCamelCase__ )
a__ : List[Any] = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Tuple = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 151
|
def UpperCamelCase_ ( __a ) -> int:
if not isinstance(__a , __a ):
raise TypeError("only integers accepted as input" )
else:
a__ : Union[str, Any] = str(abs(__a ) )
a__ : Dict = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int("".join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 151
| 1
|
snake_case_ : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 691
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary.
| 691
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->str:
'''simple docstring'''
return choice(__snake_case )
def _SCREAMING_SNAKE_CASE( snake_case_ : Dict , snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
_lowercase : Any = random_pivot(__snake_case )
# partition based on pivot
# linear time
_lowercase : str = [e for e in lst if e < pivot]
_lowercase : Union[str, Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__snake_case ) < k - 1:
return kth_number(__snake_case , k - len(__snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 411
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[str] = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def a_ ( _A , _A=False ) -> List[str]:
"""simple docstring"""
try:
snake_case__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case__ = default
else:
# KEY is set, convert it to True or False.
try:
snake_case__ = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__UpperCamelCase : Any = parse_flag_from_env("""RUN_SLOW""", default=False)
def a_ ( _A ) -> str:
"""simple docstring"""
return unittest.skip('Test was skipped' )(_A )
def a_ ( _A ) -> int:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_A )
def a_ ( _A ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_A )
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_A )
def a_ ( _A ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_A )
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_A )
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_A )
def a_ ( _A ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_A )
def a_ ( _A ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_A )
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_A )
def a_ ( _A ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_A )
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_A )
def a_ ( _A ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_A )
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_A )
def a_ ( _A ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_A )
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_A )
def a_ ( _A=None , _A=None ) -> Union[str, Any]:
"""simple docstring"""
if test_case is None:
return partial(_A , version=_A )
return unittest.skipUnless(is_torch_version('>=' , _A ) , f'''test requires torch version >= {version}''' )(_A )
def a_ ( _A ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_A )
def a_ ( _A ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_A )
def a_ ( _A ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_A )
__UpperCamelCase : str = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def a_ ( _A ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_A )
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls: str ) -> int:
snake_case__ = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase_ ( cls: List[str] ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase_ ( self: Dict ) -> int:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase )
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Union[mock.Mock, List[mock.Mock]] ) -> Tuple:
snake_case__ = mocks if isinstance(UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = AcceleratorState()
snake_case__ = tensor[None].clone().to(state.device )
snake_case__ = gather(_A ).cpu()
snake_case__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _A ):
return False
return True
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] ) -> Tuple:
snake_case__ = returncode
snake_case__ = stdout
snake_case__ = stderr
async def a_ ( _A , _A ) -> List[Any]:
"""simple docstring"""
while True:
snake_case__ = await stream.readline()
if line:
callback(_A )
else:
break
async def a_ ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(_A ) )
snake_case__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case__ = []
snake_case__ = []
def tee(_A , _A , _A , _A="" ):
snake_case__ = line.decode('utf-8' ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label='stderr:' ) ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def a_ ( _A , _A=None , _A=None , _A=180 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
snake_case__ = asyncio.get_event_loop()
snake_case__ = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
snake_case__ = ' '.join(_A )
if result.returncode > 0:
snake_case__ = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __SCREAMING_SNAKE_CASE( a_ ):
pass
def a_ ( _A , _A=False ) -> Dict:
"""simple docstring"""
try:
snake_case__ = subprocess.check_output(_A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_A , 'decode' ):
snake_case__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(_A )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 372
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = ["""names""", """prefix"""]
__UpperCamelCase : List[str] = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
__UpperCamelCase : Dict = ["""encoding_errors""", """on_bad_lines"""]
__UpperCamelCase : Any = ["""date_format"""]
@dataclass
class __SCREAMING_SNAKE_CASE( datasets.BuilderConfig ):
_UpperCAmelCase = ","
_UpperCAmelCase = None
_UpperCAmelCase = "infer"
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = "."
_UpperCAmelCase = None
_UpperCAmelCase = '"'
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = 1_0_0_0_0
_UpperCAmelCase = None
_UpperCAmelCase = "strict"
_UpperCAmelCase = "error"
_UpperCAmelCase = None
def lowerCAmelCase_ ( self: Tuple ) -> int:
if self.delimiter is not None:
snake_case__ = self.delimiter
if self.column_names is not None:
snake_case__ = self.column_names
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
snake_case__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE( datasets.ArrowBasedBuilder ):
_UpperCAmelCase = CsvConfig
def lowerCAmelCase_ ( self: str ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Dict ) -> int:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase , (str, list, tuple) ):
snake_case__ = data_files
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def lowerCAmelCase_ ( self: int , UpperCamelCase: pa.Table ) -> pa.Table:
if self.config.features is not None:
snake_case__ = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
snake_case__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case__ = table_cast(UpperCamelCase , UpperCamelCase )
return pa_table
def lowerCAmelCase_ ( self: str , UpperCamelCase: Tuple ) -> Tuple:
snake_case__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ):
snake_case__ = pd.read_csv(UpperCamelCase , iterator=UpperCamelCase , dtype=UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCamelCase ):
snake_case__ = pa.Table.from_pandas(UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}''' )
raise
| 372
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['input_features', 'is_longer']
def __init__( self , snake_case_=64 , snake_case_=4_8000 , snake_case_=480 , snake_case_=10 , snake_case_=1024 , snake_case_=0.0 , snake_case_=False , snake_case_ = 0 , snake_case_ = 1_4000 , snake_case_ = None , snake_case_ = "fusion" , snake_case_ = "repeatpad" , **snake_case_ , ):
super().__init__(
feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
_A = top_db
_A = truncation
_A = padding
_A = fft_window_size
_A = (fft_window_size >> 1) + 1
_A = hop_length
_A = max_length_s
_A = max_length_s * sampling_rate
_A = sampling_rate
_A = frequency_min
_A = frequency_max
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm=snake_case_ , mel_scale='htk' , )
_A = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case_ , min_frequency=snake_case_ , max_frequency=snake_case_ , sampling_rate=snake_case_ , norm='slaney' , mel_scale='slaney' , )
def lowerCAmelCase__ ( self ):
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = spectrogram(
snake_case_ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case_ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_A = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_A = [0]
# randomly choose index for each part
_A = np.random.choice(ranges[0] )
_A = np.random.choice(ranges[1] )
_A = np.random.choice(ranges[2] )
_A = mel[idx_front : idx_front + chunk_frames, :]
_A = mel[idx_middle : idx_middle + chunk_frames, :]
_A = mel[idx_back : idx_back + chunk_frames, :]
_A = torch.tensor(mel[None, None, :] )
_A = torch.nn.functional.interpolate(
snake_case_ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case_ )
_A = mel_shrink[0][0].numpy()
_A = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_A = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_A = len(snake_case_ ) - max_length
_A = np.random.randint(0 , overflow + 1 )
_A = waveform[idx : idx + max_length]
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
_A = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_A = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_A = np.stack([mel, mel, mel, mel] , axis=0 )
_A = False
else:
_A = self._random_mel_fusion(snake_case_ , snake_case_ , snake_case_ )
_A = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
_A = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_A = int(max_length / len(snake_case_ ) )
_A = np.stack(np.tile(snake_case_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_A = int(max_length / len(snake_case_ ) )
_A = np.stack(np.tile(snake_case_ , snake_case_ ) )
_A = np.pad(snake_case_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters )
_A = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_A = self._np_extract_fbank_features(snake_case_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
_A = truncation if truncation is not None else self.truncation
_A = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_A = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_A = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
_A = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray(snake_case_ )]
# convert to mel spectrogram, truncate and pad if needed.
_A = [
self._get_input_mel(snake_case_ , max_length if max_length else self.nb_max_samples , snake_case_ , snake_case_ )
for waveform in raw_speech
]
_A = []
_A = []
for mel, longer in padded_inputs:
input_mel.append(snake_case_ )
is_longer.append(snake_case_ )
if truncation == "fusion" and sum(snake_case_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_A = np.random.randint(0 , len(snake_case_ ) )
_A = True
if isinstance(input_mel[0] , snake_case_ ):
_A = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_A = [[longer] for longer in is_longer]
_A = {'input_features': input_mel, 'is_longer': is_longer}
_A = BatchFeature(snake_case_ )
if return_tensors is not None:
_A = input_features.convert_to_tensors(snake_case_ )
return input_features
| 27
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: int =logging.getLogger()
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(snake_case_ , "all_results.json" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , "r" ) as f:
UpperCAmelCase_ = json.load(snake_case_ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE_: Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
@classmethod
def _lowercase (cls : Any ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase (cls : int ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : str ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "translation_no_trainer" ) ) )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , "image_classification_no_trainer" ) ) )
| 78
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( _A , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
SCREAMING_SNAKE_CASE__ = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_A )-1}''' )
if "norm" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_A )-1}''' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ = key[key.find('''block''' ) + len('''block''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''block{idx}''' , F'''block.{int(_A )-1}''' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
SCREAMING_SNAKE_CASE__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
SCREAMING_SNAKE_CASE__ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_A )-1}''' )
if key.startswith('''head''' ):
SCREAMING_SNAKE_CASE__ = key.replace('''head''' , '''classifier''' )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ = kv_bias[
config.hidden_sizes[i] :
]
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = SegformerConfig()
SCREAMING_SNAKE_CASE__ = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
if "segformer" in model_name:
SCREAMING_SNAKE_CASE__ = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE__ = 1_50
SCREAMING_SNAKE_CASE__ = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = '''cityscapes-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_name[4:6]
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(_A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 2_56
elif size == "b2":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE__ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE__ = 7_68
SCREAMING_SNAKE_CASE__ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_A , align=_A , do_random_crop=_A )
# prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )
else:
SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(_A , encoder_only=_A )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_A , _A )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = SegformerForImageClassification(_A )
else:
SCREAMING_SNAKE_CASE__ = SegformerForSemanticSegmentation(_A )
model.load_state_dict(_A )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ = model(_A )
SCREAMING_SNAKE_CASE__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 472
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyImgaImgPipeline
a = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : str ) -> List[str]:
return 32
@property
def lowercase_ ( self : Optional[int] ) -> int:
return 32
@property
def lowercase_ ( self : Union[str, Any] ) -> int:
return self.time_input_dim
@property
def lowercase_ ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Union[str, Any] ) -> Any:
return 100
@property
def lowercase_ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : str ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Dict ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Tuple ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ = DDIMScheduler(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=0 ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 472
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 591
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _a (unittest.TestCase):
"""simple docstring"""
def __init__( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = parent
def UpperCamelCase ( self ) -> Any:
return {}
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
_SCREAMING_SNAKE_CASE = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _a (_lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractionTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
# Initialize feature_extractor
_SCREAMING_SNAKE_CASE = self.feature_extraction_class()
# Test not batched input
_SCREAMING_SNAKE_CASE = get_html_strings()[0]
_SCREAMING_SNAKE_CASE = feature_extractor(A__ )
# fmt: off
_SCREAMING_SNAKE_CASE = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
_SCREAMING_SNAKE_CASE = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , A__ )
self.assertEqual(encoding.xpaths , A__ )
# Test batched
_SCREAMING_SNAKE_CASE = get_html_strings()
_SCREAMING_SNAKE_CASE = feature_extractor(A__ )
# fmt: off
_SCREAMING_SNAKE_CASE = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
_SCREAMING_SNAKE_CASE = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A__ )
self.assertEqual(encoding.xpaths , A__ )
| 591
| 1
|
"""simple docstring"""
import sys
lowercase__ :str = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase_ ( UpperCAmelCase_ = N ) ->int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = -sys.maxsize - 1
for i in range(len(UpperCAmelCase_ ) - 12 ):
__UpperCAmelCase : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCAmelCase : str = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 374
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase__ :int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 374
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[Any]=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = "backbone." if is_semantic else ""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = "backbone." if is_semantic else ""
# queries, keys and values
lowerCAmelCase__ = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = q_bias
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase__ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
lowerCAmelCase__ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
lowerCAmelCase__ = gamma_a
lowerCAmelCase__ = gamma_a
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : int=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = False if "rvlcdip" in checkpoint_url else True
lowerCAmelCase__ = BeitConfig(use_absolute_position_embeddings=UpperCamelCase_ , use_mask_token=UpperCamelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase__ = 10_24
lowerCAmelCase__ = 40_96
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase__ = 16
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "rvlcdip-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , has_lm_head=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , has_lm_head=UpperCamelCase_ )
# load HuggingFace model
lowerCAmelCase__ = BeitForMaskedImageModeling(UpperCamelCase_ ) if has_lm_head else BeitForImageClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image
lowerCAmelCase__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase_ )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
lowerCAmelCase__ = encoding["pixel_values"]
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify logits
lowerCAmelCase__ = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(UpperCamelCase_ ), "Shape of logits not as expected"
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
if has_lm_head:
lowerCAmelCase__ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCAmelCase__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase_ , )
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
UpperCAmelCase__ : List[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 48
|
import argparse
_UpperCAmelCase : Union[str, Any] = """docs/source/_static/js/custom.js"""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
with open(UpperCamelCase__ , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
snake_case_ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
snake_case_ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
_UpperCAmelCase : str = parser.parse_args()
update_custom_js(args.version)
| 362
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCAmelCase ( lowercase__ : str , lowercase__ : Tuple ):
'''simple docstring'''
a__ = k_size // 2
a__ , a__ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
a__ = 1 / (2 * pi * sigma) * exp(-(square(lowercase__ ) + square(lowercase__ )) / (2 * square(lowercase__ )) )
return g
def UpperCAmelCase ( lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : List[Any] ):
'''simple docstring'''
a__ , a__ = image.shape[0], image.shape[1]
# dst image height and width
a__ = height - k_size + 1
a__ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
a__ = zeros((dst_height * dst_width, k_size * k_size) )
a__ = 0
for i, j in product(range(lowercase__ ) , range(lowercase__ ) ):
a__ = ravel(image[i : i + k_size, j : j + k_size] )
a__ = window
row += 1
# turn the kernel into shape(k*k, 1)
a__ = gen_gaussian_kernel(lowercase__ , lowercase__ )
a__ = ravel(lowercase__ )
# reshape and get the dst image
a__ = dot(lowercase__ , lowercase__ ).reshape(lowercase__ , lowercase__ ).astype(lowercase__ )
return dst
if __name__ == "__main__":
# read original image
_lowercase : List[Any] =imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
_lowercase : int =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowercase : Optional[int] =gaussian_filter(gray, 3, sigma=1)
_lowercase : List[str] =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 412
|
def UpperCAmelCase ( lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase_ ( lowerCAmelCase__="no" , lowerCAmelCase__ = default_json_config_file , lowerCAmelCase__ = False ):
"""simple docstring"""
_lowerCAmelCase : Any = Path(lowerCAmelCase__ )
path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_lowerCAmelCase : Union[str, Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_lowerCAmelCase : Tuple = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
_lowerCAmelCase : str = torch.cuda.device_count()
_lowerCAmelCase : Union[str, Any] = num_gpus
_lowerCAmelCase : List[str] = False
if num_gpus > 1:
_lowerCAmelCase : Any = "MULTI_GPU"
else:
_lowerCAmelCase : str = "NO"
elif is_xpu_available() and use_xpu:
_lowerCAmelCase : Any = torch.xpu.device_count()
_lowerCAmelCase : Union[str, Any] = num_xpus
_lowerCAmelCase : str = False
if num_xpus > 1:
_lowerCAmelCase : str = "MULTI_XPU"
else:
_lowerCAmelCase : int = "NO"
elif is_npu_available():
_lowerCAmelCase : List[Any] = torch.npu.device_count()
_lowerCAmelCase : Optional[int] = num_npus
_lowerCAmelCase : Optional[Any] = False
if num_npus > 1:
_lowerCAmelCase : int = "MULTI_NPU"
else:
_lowerCAmelCase : Tuple = "NO"
else:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Union[str, Any] = "NO"
_lowerCAmelCase : Union[str, Any] = ClusterConfig(**lowerCAmelCase__ )
config.to_json_file(lowerCAmelCase__ )
return path
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = parser.add_parser("default" , parents=lowerCAmelCase__ , help=lowerCAmelCase__ , formatter_class=lowerCAmelCase__ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCAmelCase__ , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 424
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 424
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 389
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 3_2 , snake_case_ = 6_4 , snake_case_ = 2_0 , snake_case_ = 7_6_8 , snake_case_=7_7 , snake_case_=4 , snake_case_ = 0.0 , snake_case_ = "silu" , snake_case_ = None , snake_case_ = None , snake_case_ = "linear" , snake_case_ = "prd" , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Dict = attention_head_dim
UpperCAmelCase_ : int = num_attention_heads * attention_head_dim
UpperCAmelCase_ : str = additional_embeddings
UpperCAmelCase_ : List[Any] = time_embed_dim or inner_dim
UpperCAmelCase_ : Tuple = embedding_proj_dim or embedding_dim
UpperCAmelCase_ : Union[str, Any] = clip_embed_dim or embedding_dim
UpperCAmelCase_ : Tuple = Timesteps(snake_case_ , snake_case_ , 0 )
UpperCAmelCase_ : Tuple = TimestepEmbedding(snake_case_ , snake_case_ , out_dim=snake_case_ , act_fn=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = nn.Linear(snake_case_ , snake_case_ )
if embedding_proj_norm_type is None:
UpperCAmelCase_ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ : Dict = nn.LayerNorm(snake_case_ )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
if encoder_hid_proj_type is None:
UpperCAmelCase_ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ : Tuple = nn.Linear(snake_case_ , snake_case_ )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case_ ) )
if added_emb_type == "prd":
UpperCAmelCase_ : Tuple = nn.Parameter(torch.zeros(1 , 1 , snake_case_ ) )
elif added_emb_type is None:
UpperCAmelCase_ : str = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ : Dict = nn.ModuleList(
[
BasicTransformerBlock(
snake_case_ , snake_case_ , snake_case_ , dropout=snake_case_ , activation_fn='gelu' , attention_bias=snake_case_ , )
for d in range(snake_case_ )
] )
if norm_in_type == "layer":
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
elif norm_in_type is None:
UpperCAmelCase_ : List[str] = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ : int = nn.LayerNorm(snake_case_ )
UpperCAmelCase_ : int = nn.Linear(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , snake_case_ , persistent=snake_case_ )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : Any = nn.Parameter(torch.zeros(1 , snake_case_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = {}
def fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
UpperCAmelCase_ : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case_ , snake_case_ , snake_case_ )
return processors
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(snake_case_ )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ ):
if hasattr(snake_case_ , 'set_processor' ):
if not isinstance(snake_case_ , snake_case_ ):
module.set_processor(snake_case_ )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , snake_case_ , snake_case_ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case_ , snake_case_ , snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = True , ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = hidden_states.shape[0]
UpperCAmelCase_ : Any = timestep
if not torch.is_tensor(snake_case_ ):
UpperCAmelCase_ : Any = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Optional[Any] = timesteps * torch.ones(snake_case_ , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ : List[str] = self.time_proj(snake_case_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ : List[Any] = self.time_embedding(snake_case_ )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ : Union[str, Any] = self.embedding_proj_norm(snake_case_ )
UpperCAmelCase_ : Tuple = self.embedding_proj(snake_case_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ : Tuple = self.encoder_hidden_states_proj(snake_case_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
UpperCAmelCase_ : Optional[int] = self.proj_in(snake_case_ )
UpperCAmelCase_ : Tuple = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Tuple = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ : str = hidden_states[:, None, :]
UpperCAmelCase_ : Optional[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case_ , -1 , -1 )
additional_embeds.append(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = torch.cat(
snake_case_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ : Optional[int] = F.pad(
snake_case_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
UpperCAmelCase_ : Dict = F.pad(snake_case_ , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ : List[str] = self.norm_in(snake_case_ )
for block in self.transformer_blocks:
UpperCAmelCase_ : List[str] = block(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Dict = self.norm_out(snake_case_ )
if self.prd_embedding is not None:
UpperCAmelCase_ : Optional[Any] = hidden_states[:, -1]
else:
UpperCAmelCase_ : List[Any] = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ : List[Any] = self.proj_to_clip_embeddings(snake_case_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 389
| 1
|
"""simple docstring"""
from __future__ import annotations
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 10 ):
lowerCAmelCase = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 4
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__UpperCamelCase : Union[str, Any] = '''examples/'''
__UpperCamelCase : str = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__UpperCamelCase : List[str] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__UpperCamelCase : Optional[int] = '''README.md'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase ,lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace('VERSION' , _UpperCAmelCase )
lowerCAmelCase = re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(_UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern='examples' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not patch:
update_version_in_examples(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
lowerCAmelCase = '1. Want to contribute a new model?'
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
lowerCAmelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ():
with open(REPLACE_FILES['init'] , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(_UpperCAmelCase ).groups()[0]
return packaging.version.parse(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple=False ):
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = get_version()
lowerCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(_UpperCAmelCase ) == 0:
lowerCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(_UpperCAmelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 4
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : torch.FloatTensor
class lowerCAmelCase_ ( __snake_case , __snake_case ):
@register_to_config
def __init__( self , _lowerCAmelCase = 6_5_5_3_6 , _lowerCAmelCase = None , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0 , _lowerCAmelCase = "fourier" , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = 0.0 , _lowerCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCAmelCase = "UNetMidBlock1D" , _lowerCAmelCase = None , _lowerCAmelCase = (3_2, 3_2, 6_4) , _lowerCAmelCase = None , _lowerCAmelCase = 8 , _lowerCAmelCase = 1 , _lowerCAmelCase = False , ):
super().__init__()
_lowercase : str = sample_size
# time
if time_embedding_type == "fourier":
_lowercase : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A__ , log=A__ , flip_sin_to_cos=A__ )
_lowercase : Optional[int] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowercase : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A__ , downscale_freq_shift=A__ )
_lowercase : int = block_out_channels[0]
if use_timestep_embedding:
_lowercase : Any = block_out_channels[0] * 4
_lowercase : List[Any] = TimestepEmbedding(
in_channels=A__ , time_embed_dim=A__ , act_fn=A__ , out_dim=block_out_channels[0] , )
_lowercase : int = nn.ModuleList([] )
_lowercase : Tuple = None
_lowercase : List[str] = nn.ModuleList([] )
_lowercase : int = None
# down
_lowercase : List[str] = in_channels
for i, down_block_type in enumerate(A__ ):
_lowercase : Optional[Any] = output_channel
_lowercase : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowercase : str = i == len(A__ ) - 1
_lowercase : Any = get_down_block(
A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A__ )
# mid
_lowercase : List[str] = get_mid_block(
A__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A__ , add_downsample=A__ , )
# up
_lowercase : Union[str, Any] = list(reversed(A__ ) )
_lowercase : int = reversed_block_out_channels[0]
if out_block_type is None:
_lowercase : str = out_channels
else:
_lowercase : Any = block_out_channels[0]
for i, up_block_type in enumerate(A__ ):
_lowercase : List[str] = output_channel
_lowercase : str = (
reversed_block_out_channels[i + 1] if i < len(A__ ) - 1 else final_upsample_channels
)
_lowercase : int = i == len(A__ ) - 1
_lowercase : str = get_up_block(
A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A__ )
_lowercase : Dict = output_channel
# out
_lowercase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_lowercase : List[Any] = get_out_block(
out_block_type=A__ , num_groups_out=A__ , embed_dim=block_out_channels[0] , out_channels=A__ , act_fn=A__ , fc_dim=block_out_channels[-1] // 4 , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ):
_lowercase : Any = timestep
if not torch.is_tensor(A__ ):
_lowercase : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
_lowercase : Optional[Any] = timesteps[None].to(sample.device )
_lowercase : List[str] = self.time_proj(A__ )
if self.config.use_timestep_embedding:
_lowercase : Tuple = self.time_mlp(A__ )
else:
_lowercase : Optional[int] = timestep_embed[..., None]
_lowercase : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowercase : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowercase : List[str] = ()
for downsample_block in self.down_blocks:
_lowercase : str = downsample_block(hidden_states=A__ , temb=A__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowercase : Any = self.mid_block(A__ , A__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowercase : int = down_block_res_samples[-1:]
_lowercase : str = down_block_res_samples[:-1]
_lowercase : List[Any] = upsample_block(A__ , res_hidden_states_tuple=A__ , temb=A__ )
# 5. post-process
if self.out_block:
_lowercase : str = self.out_block(A__ , A__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A__ )
| 720
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.