code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__a : Optional[Any] = IFImgaImgSuperResolutionPipeline
__a : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
__a : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
__a : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self ) -> str:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self, snake_case__, snake_case__=0 ) -> Optional[int]:
"""simple docstring"""
if str(snake_case__ ).startswith("""mps""" ):
lowercase_ : List[Any] = torch.manual_seed(snake_case__ )
else:
lowercase_ : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase_ : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase_ : Any = floats_tensor((1, 3, 16, 16), rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""", reason="""float16 requires CUDA""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2, )
| 458
|
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ) -> Tuple:
"""simple docstring"""
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 458
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowercase ( _a ,_a ,_a ,_a ) -> Union[str, Any]:
UpperCAmelCase_: str = s.rsplit(_a ,_a )
return new.join(_a )
def lowercase ( _a ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowercase ( _a ) -> str:
UpperCAmelCase_: Optional[int] = {}
UpperCAmelCase_: Union[str, Any] = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase_: int = key.replace(f"{group_key}." ,f"{group_key}.group." )
if "res_path" in key:
UpperCAmelCase_: List[Any] = key.replace("res_path." ,"res_path.path." )
if key.endswith(".w" ):
UpperCAmelCase_: int = rreplace(_a ,".w" ,".weight" ,1 )
if key.endswith(".b" ):
UpperCAmelCase_: List[str] = rreplace(_a ,".b" ,".bias" ,1 )
UpperCAmelCase_: Optional[Any] = value.float()
return upgrade
@torch.no_grad()
def lowercase ( _a ,_a ,_a=None ,_a=True ) -> Optional[Any]:
from dall_e import Encoder
UpperCAmelCase_: Optional[int] = Encoder()
if os.path.exists(_a ):
UpperCAmelCase_: int = torch.load(_a )
else:
UpperCAmelCase_: Union[str, Any] = torch.hub.load_state_dict_from_url(_a )
if isinstance(_a ,_a ):
UpperCAmelCase_: str = ckpt.state_dict()
encoder.load_state_dict(_a )
if config_path is not None:
UpperCAmelCase_: List[str] = FlavaImageCodebookConfig.from_pretrained(_a )
else:
UpperCAmelCase_: Dict = FlavaImageCodebookConfig()
UpperCAmelCase_: List[str] = FlavaImageCodebook(_a ).eval()
UpperCAmelCase_: Union[str, Any] = encoder.state_dict()
UpperCAmelCase_: List[str] = upgrade_state_dict(_a )
hf_model.load_state_dict(_a )
UpperCAmelCase_: Optional[int] = hf_model.state_dict()
UpperCAmelCase_: Union[str, Any] = count_parameters(_a )
UpperCAmelCase_: int = count_parameters(_a )
assert torch.allclose(_a ,_a ,atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_a )
else:
return hf_state_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowerCAmelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 710
|
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(["""image"""])
_lowerCAmelCase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""image"""])
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """image""", """negative_prompt"""])
_lowerCAmelCase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_lowerCAmelCase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""image""", """mask_image"""])
_lowerCAmelCase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowerCAmelCase = frozenset(["""example_image""", """image""", """mask_image"""])
_lowerCAmelCase = frozenset(["""class_labels"""])
_lowerCAmelCase = frozenset(["""class_labels"""])
_lowerCAmelCase = frozenset(["""batch_size"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(["""batch_size"""])
_lowerCAmelCase = frozenset([])
_lowerCAmelCase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowerCAmelCase = frozenset(["""prompt""", """negative_prompt"""])
_lowerCAmelCase = frozenset(["""input_tokens"""])
_lowerCAmelCase = frozenset(["""input_tokens"""])
| 306
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 223
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase__ : Optional[Any] = {'UserAgent': UserAgent().random}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = script.contents[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''https://www.instagram.com/{username}/'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_json()
def __magic_name__ (self ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
SCREAMING_SNAKE_CASE__ : List[str] = BeautifulSoup(SCREAMING_SNAKE_CASE__ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ) -> str:
"""simple docstring"""
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__(self ) -> str:
"""simple docstring"""
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def __magic_name__ (self ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def __magic_name__ (self ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowercase_ ( _snake_case = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE__ : List[Any] = InstagramUser(_snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data ,_snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : List[str] = InstagramUser('github')
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 223
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
def lowerCAmelCase_ ( lowercase: float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( lowercase: float ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(lowercase , lowercase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22
|
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) == 0:
return []
a_ , a_ : List[Any] = min(a__), max(a__)
a_ : Tuple = int(max_value - min_value) + 1
a_ : list[list] = [[] for _ in range(a__)]
for i in my_list:
buckets[int(i - min_value)].append(a__)
return [v for bucket in buckets for v in sorted(a__)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 540
| 0
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case__ : Dict = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case__ : Optional[int] = concatenate_datasets
snake_case__ : List[Any] = DownloadConfig
snake_case__ : Optional[Any] = DownloadManager
snake_case__ : List[str] = DownloadMode
snake_case__ : Optional[int] = DownloadConfig
snake_case__ : int = DownloadMode
snake_case__ : List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 707
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
snake_case__ : List[str] = logging.get_logger(__name__)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 618
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase_ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCAmelCase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCAmelCase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A (datasets.Metric ):
def __a ( self ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def __a ( self , lowercase_ , lowercase_ , lowercase_ = 1 , lowercase_ = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase_ , hypotheses=lowercase_ , min_len=lowercase_ , max_len=lowercase_ )
}
| 326
|
from __future__ import annotations
def A_ ( lowercase_ ) -> bool:
_snake_case : Tuple = str(lowercase_ )
return len(lowercase_ ) == 9 and set(lowercase_ ) == set('''123456789''' )
def A_ ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
_snake_case : str = 100002 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
_snake_case : List[str] = 1002003 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE__ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCAmelCase__ ( ) -> Optional[int]:
"""simple docstring"""
snake_case = Github(os.environ['GITHUB_TOKEN'] )
snake_case = g.get_repo('huggingface/diffusers' )
snake_case = repo.get_issues(state='open' )
for issue in open_issues:
snake_case = sorted(issue.get_comments() , key=lambda _UpperCamelCase : i.created_at , reverse=_snake_case )
snake_case = comments[0] if len(_snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 711
|
"""simple docstring"""
import os
import string
import sys
SCREAMING_SNAKE_CASE__ = 1 << 8
SCREAMING_SNAKE_CASE__ = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
SCREAMING_SNAKE_CASE__ = KEYMAP["up"]
SCREAMING_SNAKE_CASE__ = KEYMAP["left"]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE__ = ord(str(i))
def lowerCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
snake_case = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
snake_case = chr(KEYMAP['esc'] )
except KeyError:
snake_case = cha[1]
else:
snake_case = ch.decode(_UpperCamelCase )
else:
snake_case = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case = sys.stdin.fileno()
snake_case = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
snake_case = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase , termios.TCSADRAIN , _UpperCamelCase )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
snake_case = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
snake_case = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 104
| 0
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__A =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
__A =[]
__A =[]
__A ={'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
__A =[
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
__A =0
for log in Path().glob('''*.log'''):
__A =0
with open(log, '''r''') as f:
for line in f:
__A =json.loads(line)
if line.get('''nodeid''', '''''') != "":
__A =line['''nodeid''']
if line.get('''duration''', None) is not None:
__A =F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__A =[]
log.unlink()
__A =''''''
__A =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__A =[]
__A ={}
for test in failed_tests:
__A =test[0].split('''::''')
__A =data[0].split('''/''')[-1]
if data[0] not in filesafailed:
__A =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__A =[test[0] for test in failed_table]
__A =list(set(files))
# Count number of instances in failed_tests
__A =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
__A =tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
__A ='''Too many failed tests, please see the full report in the Action results.'''
__A =len(err) + 1_0
__A =message[: 3_0_0_0 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__A ='''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
__A =WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
__A ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
__A ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
__A ={
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__A =client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
__A =response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__A =''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__A =row[0]
else:
__A =''''''
__A ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 463
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase ={
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def _A ( _a : str ):
"""simple docstring"""
A = list(s_dict.keys() )
for key in keys:
A = r""".*/layers_(\d+)"""
A = key
if re.match(_a , _a ):
A = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , _a )
A = r"""(encoder|decoder)\/"""
if re.match(_a , _a ):
A = re.match(_a , _a ).groups()
if groups[0] == "encoder":
A = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , _a )
elif groups[0] == "decoder":
A = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , _a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A = new_key.replace(_a , _a )
print(f'{key} -> {new_key}' )
A = s_dict.pop(_a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A = s_dict[key].shape[0]
A = s_dict[key]
for idx in range(_a ):
A = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_a )
return s_dict
UpperCAmelCase ={
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def _A ( _a : Dict , _a : int ):
"""simple docstring"""
import regex as re
with open(_a , """r""" ) as f:
A = f.read()
A = re.findall(r"""(.*) = ([0-9.]*)""" , _a )
A = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A = float(_a ) if """.""" in value else int(_a )
A = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , _a )[0]
A = str(activation[1] )
A = num_experts
A = SwitchTransformersConfig(**_a )
return config
def _A ( _a : Union[str, Any] , _a : Dict , _a : Optional[Any]=None , _a : Dict="./" , _a : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
A = checkpoints.load_tax_checkpoint(_a )
if gin_file is not None:
A = convert_gin_to_config(_a , _a )
else:
A = SwitchTransformersConfig.from_pretrained(_a )
A = SwitchTransformersForConditionalGeneration(_a )
A = flax_params["""target"""]
A = flatten_dict(_a , sep="""/""" )
A = rename_keys(_a )
A = unflatten_dict(_a , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_a , _a )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
UpperCAmelCase =parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 617
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : bool = False , ) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : str = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = nn.Embedding(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = nn.Dropout(p=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
__lowerCAmelCase : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
__lowerCAmelCase : List[str] = TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
__lowerCAmelCase : Dict = TaLayerNorm(lowerCAmelCase )
__lowerCAmelCase : Dict = nn.Dropout(p=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : str = self.token_embedder(lowerCAmelCase )
__lowerCAmelCase : Dict = encoder_input_tokens.shape[1]
__lowerCAmelCase : List[Any] = torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
__lowerCAmelCase : Tuple = encoder_input_tokens.size()
__lowerCAmelCase : Union[str, Any] = self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
__lowerCAmelCase : List[str] = lyr(lowerCAmelCase , lowerCAmelCase )[0]
__lowerCAmelCase : str = self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 218
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Tuple ="roc_bert"
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]=3_05_22 , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : str=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=30_72 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : int=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1e-12 , lowerCAmelCase : Any=True , lowerCAmelCase : int=0 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[str]=None , lowerCAmelCase : str=True , lowerCAmelCase : Dict=True , lowerCAmelCase : int=7_68 , lowerCAmelCase : Union[str, Any]=9_10 , lowerCAmelCase : Tuple=5_12 , lowerCAmelCase : Tuple=2_48_58 , lowerCAmelCase : Any=True , **lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = use_cache
__lowerCAmelCase : Dict = enable_pronunciation
__lowerCAmelCase : Optional[int] = enable_shape
__lowerCAmelCase : Any = pronunciation_embed_dim
__lowerCAmelCase : Optional[Any] = pronunciation_vocab_size
__lowerCAmelCase : Tuple = shape_embed_dim
__lowerCAmelCase : Tuple = shape_vocab_size
__lowerCAmelCase : List[Any] = concat_input
__lowerCAmelCase : List[Any] = position_embedding_type
__lowerCAmelCase : List[Any] = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
| 218
| 1
|
from __future__ import annotations
def a ( A__ ) -> list[int]:
'''simple docstring'''
if len(A__ ) == 0:
return array
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = min(A__ ), max(A__ )
# Compute the variables
SCREAMING_SNAKE_CASE__ : Dict = _max - _min + 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
SCREAMING_SNAKE_CASE__ : List[str] = i - _min
SCREAMING_SNAKE_CASE__ : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(A__ ):
while holes_repeat[i] > 0:
SCREAMING_SNAKE_CASE__ : str = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Any = input('Enter numbers separated by comma:\n')
a_ :Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 35
|
"""simple docstring"""
def __snake_case ( ) -> Union[str, Any]:
lowercase : str = 0
for i in range(1 ,1001 ):
total += i**i
return str(__A )[-10:]
if __name__ == "__main__":
print(solution())
| 607
| 0
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _UpperCamelCase : list[int], _UpperCamelCase : int ) -> list[int]:
A_ = 0
A_ = len(_UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ = i + 1
else:
A_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 716
|
'''simple docstring'''
from itertools import product
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> list[int]:
A_ = sides_number
A_ = max_face_number * dice_number
A_ = [0] * (max_total + 1)
A_ = 1
A_ = range(_UpperCamelCase, max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase, repeat=_UpperCamelCase ):
A_ = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCAmelCase ( ) -> float:
A_ = total_frequency_distribution(
sides_number=4, dice_number=9 )
A_ = total_frequency_distribution(
sides_number=6, dice_number=6 )
A_ = 0
A_ = 9
A_ = 4 * 9
A_ = 6
for peter_total in range(_UpperCamelCase, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A_ = (4**9) * (6**6)
A_ = peter_wins_count / total_games_number
A_ = round(_UpperCamelCase, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
UpperCAmelCase__ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = RealmTokenizer
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any="[UNK]" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : List[str]="[PAD]" , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : Optional[int]="[MASK]" , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=None , **__UpperCAmelCase : str , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->List[Any]:
"""simple docstring"""
a = PaddingStrategy.MAX_LENGTH
a = text
a = kwargs.pop('''text_pair''' , __UpperCAmelCase )
a = kwargs.pop('''return_tensors''' , __UpperCAmelCase )
a = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__UpperCAmelCase ):
if batch_text_pair is not None:
a = batch_text_pair[idx]
else:
a = None
a = super().__call__(__UpperCAmelCase , __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
a = encoded_candidates.get('''input_ids''' )
a = encoded_candidates.get('''attention_mask''' )
a = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__UpperCAmelCase )
a = {key: item for key, item in output_data.items() if len(__UpperCAmelCase ) != 0}
return BatchEncoding(__UpperCAmelCase , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 117
| 0
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : Optional[int]=13 , __snake_case : Optional[int]=7 , __snake_case : Union[str, Any]=True , __snake_case : str=True , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=99 , __snake_case : Tuple=32 , __snake_case : str=2 , __snake_case : str=4 , __snake_case : Union[str, Any]=37 , __snake_case : Dict="gelu" , __snake_case : str=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Any=512 , __snake_case : Any=16 , __snake_case : str=2 , __snake_case : List[str]=0.02 , __snake_case : Dict=3 , __snake_case : Tuple=4 , __snake_case : str=None , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = 13
UpperCAmelCase_ : List[Any] = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Union[str, Any] = 99
UpperCAmelCase_ : Dict = 32
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : int = 37
UpperCAmelCase_ : Optional[Any] = '''gelu'''
UpperCAmelCase_ : List[str] = 0.1
UpperCAmelCase_ : Dict = 0.1
UpperCAmelCase_ : int = 512
UpperCAmelCase_ : Tuple = 16
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[Any] = 0.02
UpperCAmelCase_ : List[Any] = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Any = None
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = TFRoFormerModel(config=__snake_case )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : Dict = [input_ids, input_mask]
UpperCAmelCase_ : Optional[Any] = model(__snake_case )
UpperCAmelCase_ : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = TFRoFormerForCausalLM(config=__snake_case )
UpperCAmelCase_ : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCamelCase ( self : str , __snake_case : Tuple , __snake_case : Any , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = TFRoFormerForMaskedLM(config=__snake_case )
UpperCAmelCase_ : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Any = TFRoFormerForSequenceClassification(config=__snake_case )
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : int = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.num_choices
UpperCAmelCase_ : Optional[Any] = TFRoFormerForMultipleChoice(config=__snake_case )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : int = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : int , __snake_case : Any , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = TFRoFormerForTokenClassification(config=__snake_case )
UpperCAmelCase_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : List[str] = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = TFRoFormerForQuestionAnswering(config=__snake_case )
UpperCAmelCase_ : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ : Dict = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : List[str] = False
def _lowerCamelCase ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = TFRoFormerModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__snake_case )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCAmelCase_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Dict = model(__snake_case )[0]
# TODO Replace vocab size
UpperCAmelCase_ : List[Any] = 50_000
UpperCAmelCase_ : Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __snake_case )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase_ : Optional[Any] = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-4 )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = 1E-4
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tf.constant([[4, 10]] )
UpperCAmelCase_ : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase_ : Any = emba(input_ids.shape )
UpperCAmelCase_ : Optional[int] = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(__snake_case , __snake_case , atol=self.tolerance )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
UpperCAmelCase_ : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCAmelCase_ : str = emba.weight[:3, :5]
tf.debugging.assert_near(__snake_case , __snake_case , atol=self.tolerance )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = 1E-4
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
# 2,12,16,64
UpperCAmelCase_ : str = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase_ : Union[str, Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCAmelCase_ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCAmelCase_ : Optional[int] = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ : Tuple = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
UpperCAmelCase_ : Any = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __snake_case , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __snake_case , atol=self.tolerance )
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ , snake_case__):
return sum(c * (x**i) for i, c in enumerate(lowercase_))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = 0.0
for coeff in reversed(lowercase_):
lowerCAmelCase_ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_lowercase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowercase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 659
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_( A__, A__, A__ ):
'''simple docstring'''
lowercase__ : List[Any] = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = 5_0_2_5_7 , lowerCamelCase__ = 1_0_2_4 , lowerCamelCase__ = 7_6_8 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = 1_2 , lowerCamelCase__ = None , lowerCamelCase__ = "gelu_new" , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1e-5 , lowerCamelCase__ = 0.0_2 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
super().__init__()
_lowerCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_lowerCamelCase = prefix_inner_dim
_lowerCamelCase = prefix_hidden_dim
_lowerCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCamelCase = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCamelCase = GPTaConfig(
vocab_size=lowerCamelCase__ , n_positions=lowerCamelCase__ , n_embd=lowerCamelCase__ , n_layer=lowerCamelCase__ , n_head=lowerCamelCase__ , n_inner=lowerCamelCase__ , activation_function=lowerCamelCase__ , resid_pdrop=lowerCamelCase__ , embd_pdrop=lowerCamelCase__ , attn_pdrop=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , initializer_range=lowerCamelCase__ , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , scale_attn_by_inverse_layer_idx=lowerCamelCase__ , reorder_and_upcast_attn=lowerCamelCase__ , )
_lowerCamelCase = GPTaLMHeadModel(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
_lowerCamelCase = self.encode_prefix(lowerCamelCase__ )
_lowerCamelCase = self.decode_prefix(lowerCamelCase__ )
_lowerCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_lowerCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_lowerCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ , labels=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return torch.zeros(lowerCamelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = torch.split(lowerCamelCase__ , 1 , dim=0 )
_lowerCamelCase = []
_lowerCamelCase = []
for feature in features:
_lowerCamelCase = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
_lowerCamelCase , _lowerCamelCase = self.generate_beam(
input_embeds=lowerCamelCase__ , device=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
_lowerCamelCase = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = 5 , lowerCamelCase__ = 6_7 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ):
_lowerCamelCase = eos_token_id
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.int )
_lowerCamelCase = torch.zeros(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
_lowerCamelCase = input_embeds
else:
_lowerCamelCase = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_lowerCamelCase = self.transformer(inputs_embeds=lowerCamelCase__ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCamelCase = logits.softmax(-1 ).log()
if scores is None:
_lowerCamelCase , _lowerCamelCase = logits.topk(lowerCamelCase__ , -1 )
_lowerCamelCase = generated.expand(lowerCamelCase__ , *generated.shape[1:] )
_lowerCamelCase , _lowerCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_lowerCamelCase = next_tokens
else:
_lowerCamelCase = tokens.expand(lowerCamelCase__ , *tokens.shape[1:] )
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
_lowerCamelCase = -float(np.inf )
_lowerCamelCase = 0
_lowerCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCamelCase = scores_sum / seq_lengths[:, None]
_lowerCamelCase , _lowerCamelCase = scores_sum_average.view(-1 ).topk(lowerCamelCase__ , -1 )
_lowerCamelCase = next_tokens // scores_sum.shape[1]
_lowerCamelCase = seq_lengths[next_tokens_source]
_lowerCamelCase = next_tokens % scores_sum.shape[1]
_lowerCamelCase = next_tokens.unsqueeze(1 )
_lowerCamelCase = tokens[next_tokens_source]
_lowerCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
_lowerCamelCase = generated[next_tokens_source]
_lowerCamelCase = scores_sum_average * seq_lengths
_lowerCamelCase = is_stopped[next_tokens_source]
_lowerCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_lowerCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
_lowerCamelCase = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
_lowerCamelCase = scores / seq_lengths
_lowerCamelCase = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
_lowerCamelCase = [tokens[i] for i in order]
_lowerCamelCase = torch.stack(lowerCamelCase__ , dim=0 )
_lowerCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 661
| 0
|
from math import factorial
def __A ( _A = 100 ):
"""simple docstring"""
return sum(int(_A ) for x in str(factorial(_A ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 525
|
def __A ( _A ):
"""simple docstring"""
__a = []
for data in source_data:
for i, el in enumerate(_A ):
if len(_A ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_A ) )
return data_lists
def __A ( _A , _A ):
"""simple docstring"""
__a = []
for dlist, weight in zip(_A , _A ):
__a = min(_A )
__a = max(_A )
__a = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__a = f"""Invalid weight of {weight:f} provided"""
raise ValueError(_A )
score_lists.append(_A )
return score_lists
def __A ( _A ):
"""simple docstring"""
__a = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_A ):
__a = final_scores[j] + ele
return final_scores
def __A ( _A , _A ):
"""simple docstring"""
__a = get_data(_A )
__a = calculate_each_score(_A , _A )
__a = generate_final_scores(_A )
# append scores to source data
for i, ele in enumerate(_A ):
source_data[i].append(_A )
return source_data
| 525
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__UpperCamelCase , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = "bart"
SCREAMING_SNAKE_CASE : Any = True
@st.cache(allow_output_mutation=__UpperCamelCase )
def lowerCamelCase_ ( ):
if LOAD_DENSE_INDEX:
A_ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
A_ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
A_ = qar_model.eval()
else:
A_ , A_ = (None, None)
if MODEL_TYPE == "bart":
A_ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
A_ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
A_ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
A_ = sas_model.eval()
else:
A_ , A_ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCamelCase )
def lowerCamelCase_ ( ):
if LOAD_DENSE_INDEX:
A_ = faiss.StandardGpuResources()
A_ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
A_ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
A_ = faiss.IndexFlatIP(1_28 )
A_ = faiss.index_cpu_to_gpu(__UpperCamelCase , 1 , __UpperCamelCase )
wikiaab_gpu_index_flat.add(__UpperCamelCase ) # TODO fix for larger GPU
else:
A_ , A_ = (None, None)
A_ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCamelCase )
def lowerCamelCase_ ( ):
A_ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
A_ = elia['''train_eli5''']
A_ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
A_ = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__UpperCamelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = load_train_data()
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase=10 ):
A_ = embed_questions_for_retrieval([question] , __UpperCamelCase , __UpperCamelCase )
A_ , A_ = eli5_train_q_index.search(__UpperCamelCase , __UpperCamelCase )
A_ = [elia_train[int(__UpperCamelCase )] for i in I[0]]
return nn_examples
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase="wiki40b" , __UpperCamelCase="dense" , __UpperCamelCase=10 ):
if source == "none":
A_ , A_ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
A_ , A_ = query_qa_dense_index(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A_ , A_ = query_es_index(
__UpperCamelCase , __UpperCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__UpperCamelCase , )
A_ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
A_ = '''question: {} context: {}'''.format(__UpperCamelCase , __UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCamelCase : None),
} )
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=64 , __UpperCamelCase=2_56 , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=0.95 , __UpperCamelCase=0.8 ):
with torch.no_grad():
A_ = qa_sas_generate(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , num_answers=1 , num_beams=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase , do_sample=__UpperCamelCase , temp=__UpperCamelCase , top_p=__UpperCamelCase , top_k=__UpperCamelCase , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : Union[str, Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : int = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : Optional[int] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : Optional[Any] = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : Optional[Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Optional[Any] = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : Optional[Any] = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : Optional[Any] = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : Any = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : Optional[Any] = "wiki40b"
SCREAMING_SNAKE_CASE : Tuple = "dense"
SCREAMING_SNAKE_CASE : Union[str, Any] = "beam"
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : str = 64
SCREAMING_SNAKE_CASE : Any = 256
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Dict = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : List[Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : int = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : List[str] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Optional[Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Any = None
# start main text
SCREAMING_SNAKE_CASE : Tuple = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : Union[str, Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : str = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : Tuple = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : int = support_list[:10]
SCREAMING_SNAKE_CASE : List[Any] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : str = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : str = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : int = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : Tuple = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : List[str] = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Tuple = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : Any = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 141
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__UpperCAmelCase : Optional[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
__UpperCAmelCase : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCamelCase :
UpperCAmelCase : int
UpperCAmelCase : Node | None
class lowerCamelCase :
def __init__( self : Any , __snake_case : Iterable[int] ) -> None:
_a : Node | None = None
for i in sorted(__snake_case , reverse=__snake_case ):
_a : Tuple = Node(__snake_case , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
_a : str = self.head
while node:
yield node.data
_a : Tuple = node.next_node
def __len__( self : Optional[int] ) -> int:
return sum(1 for _ in self )
def __str__( self : Tuple ) -> str:
return " -> ".join([str(__snake_case ) for node in self] )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 712
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCamelCase_ ( UpperCamelCase_ = 8 ):
_a : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase_ )
_a : Dict = i // 3
_a : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_a : Optional[Any] = (
chars_incl
+ random(UpperCamelCase_ , quotient + remainder )
+ random(UpperCamelCase_ , UpperCamelCase_ )
+ random(UpperCamelCase_ , UpperCamelCase_ )
)
_a : List[str] = list(UpperCamelCase_ )
shuffle(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
# random is a generalised function for letters, characters and numbers
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 8 ):
if len(UpperCamelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
_a : List[str] = any(char in ascii_uppercase for char in password )
_a : Optional[Any] = any(char in ascii_lowercase for char in password )
_a : int = any(char in digits for char in password )
_a : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCamelCase_ ( ):
_a : Any = int(input('''Please indicate the max length of your password: ''' ).strip() )
_a : Any = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCamelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCamelCase_ , UpperCamelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 249
| 0
|
"""simple docstring"""
from random import randint, random
def _UpperCamelCase ( A , A , A , A = False , A = False , A = 5 , ):
UpperCamelCase_ =[[-1] * number_of_cells] # Create a highway without any car
UpperCamelCase_ =0
UpperCamelCase_ =max(SCREAMING_SNAKE_CASE__ , 0 )
while i < number_of_cells:
UpperCamelCase_ =(
randint(0 , SCREAMING_SNAKE_CASE__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =0
UpperCamelCase_ =highway_now[car_index + 1 :]
for cell in range(len(SCREAMING_SNAKE_CASE__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(SCREAMING_SNAKE_CASE__ , -1 )
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =len(SCREAMING_SNAKE_CASE__ )
# Beforce calculations, the highway is empty
UpperCamelCase_ =[-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCamelCase_ =min(highway_now[car_index] + 1 , SCREAMING_SNAKE_CASE__ )
# Number of empty cell before the next car
UpperCamelCase_ =get_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - 1
# We can't have the car causing an accident
UpperCamelCase_ =min(next_highway[car_index] , SCREAMING_SNAKE_CASE__ )
if random() < probability:
# Randomly, a driver will slow down
UpperCamelCase_ =max(next_highway[car_index] - 1 , 0 )
return next_highway
def _UpperCamelCase ( A , A , A , A ):
UpperCamelCase_ =len(highway[0] )
for i in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ =update(highway[i] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ =[-1] * number_of_cells
for car_index in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ =next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCamelCase_ =(car_index + speed) % number_of_cells
# Commit the change of position
UpperCamelCase_ =speed
highway.append(SCREAMING_SNAKE_CASE__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391
|
'''simple docstring'''
from statistics import mean, stdev
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : List[str] = min(SCREAMING_SNAKE_CASE__ )
__a : Tuple = max(SCREAMING_SNAKE_CASE__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE__ ) for x in data]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 3 ):
__a : Dict = mean(SCREAMING_SNAKE_CASE__ )
__a : str = stdev(SCREAMING_SNAKE_CASE__ )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE__ ) for x in data]
| 597
| 0
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
if not (isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
lowerCamelCase__ : Any =len(__lowerCamelCase )
lowerCamelCase__ : Any =len(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =[[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase__ : Dict =0
lowerCamelCase__ : Dict =0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase__ : str =1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase__ : Optional[Any] =i
lowerCamelCase__ : Union[str, Any] =dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["CLIPFeatureExtractor"]
_lowercase : int = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 625
| 0
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
if hor == 1_28:
__A : str = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__A : Dict = (32, 1_28, 2_56)
__A : List[str] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
__A : Optional[int] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__A : Union[str, Any] = (32, 64, 1_28, 2_56)
__A : Union[str, Any] = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
__A : int = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__A : int = model.state_dict()
__A : Optional[int] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
__A : int = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__A : int = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__A : Tuple = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(a , a )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : List[str] = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
__A : List[Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
__A : Union[str, Any] = model
__A : Tuple = UNetaDModel(**a )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__A : Tuple = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__A : List[Any] = state_dict.pop(a )
hf_value_function.load_state_dict(a )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(a , a )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 239
|
from collections.abc import Iterable
from typing import Any
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
__A : Any = value
__A : Node | None = None # Added in order to delete a node easier
__A : Node | None = None
__A : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
__A : Union[str, Any] = root
def __str__( self ):
return str(self.root )
def UpperCAmelCase_ ( self , _A , _A ):
if new_children is not None: # reset its kids
__A : Optional[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_A ): # If it is the right children
__A : List[Any] = new_children
else:
__A : Any = new_children
else:
__A : Optional[int] = new_children
def UpperCAmelCase_ ( self , _A ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self ):
return self.root is None
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = Node(_A ) # create a new Node
if self.empty(): # if Tree is empty
__A : Any = new_node # set its root
else: # Tree is not empty
__A : Union[str, Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__A : List[Any] = new_node # We insert the new node in a leaf
break
else:
__A : Any = parent_node.left
else:
if parent_node.right is None:
__A : Any = new_node
break
else:
__A : List[str] = parent_node.right
__A : Union[str, Any] = parent_node
def UpperCAmelCase_ ( self , *_A ):
for value in values:
self.__insert(_A )
def UpperCAmelCase_ ( self , _A ):
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
__A : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__A : str = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self , _A = None ):
if node is None:
if self.root is None:
return None
__A : int = self.root
if not self.empty():
while node.right is not None:
__A : Optional[int] = node.right
return node
def UpperCAmelCase_ ( self , _A = None ):
if node is None:
__A : Optional[int] = self.root
if self.root is None:
return None
if not self.empty():
__A : Union[str, Any] = self.root
while node.left is not None:
__A : Any = node.left
return node
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = self.search(_A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_A , _A )
elif node.left is None: # Has only right children
self.__reassign_nodes(_A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_A , node.left )
else:
__A : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__A : Dict = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self , _A ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self , _A=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self , _A , _A ):
if node:
self.inorder(_A , node.left )
arr.append(node.value )
self.inorder(_A , node.right )
def UpperCAmelCase_ ( self , _A , _A ):
__A : list[int] = []
self.inorder(_A , _A ) # append all values to list using inorder traversal
return arr[k - 1]
def _SCREAMING_SNAKE_CASE ( a ) -> list[Node]:
__A : Optional[int] = []
if curr_node is not None:
__A : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__A : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(a )
# Prints all the elements of the list in order traversal
print(a )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(a )
print(a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 239
| 1
|
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
_UpperCAmelCase =""
for word_or_phrase in separated:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 592
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case__ : List[Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
snake_case__ : str = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
snake_case__ : Tuple = '▁'
class _a ( A__ ):
"""simple docstring"""
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case =["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case = None , **_snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase =AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_UpperCAmelCase =vocab_file
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
_UpperCAmelCase ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase =len(self.sp_model ) - 1
_UpperCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase =[self.cls_token_id]
_UpperCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
_UpperCAmelCase =[self.sep_token_id]
_UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ={self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase =self.sp_model.PieceToId(_snake_case )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[]
_UpperCAmelCase =""
_UpperCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
_UpperCAmelCase =True
_UpperCAmelCase =[]
else:
current_sub_tokens.append(_snake_case )
_UpperCAmelCase =False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def __getstate__( self ):
_UpperCAmelCase =self.__dict__.copy()
_UpperCAmelCase =None
return state
def __setstate__( self , _snake_case ):
_UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase ={}
_UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , "wb" ) as fi:
_UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 592
| 1
|
'''simple docstring'''
import datasets
UpperCAmelCase__ = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
UpperCAmelCase__ = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
UpperCAmelCase__ = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def lowerCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] ) -> str:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
| 186
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) -> Optional[int]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
__A, __A= TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
__A, __A= AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A= AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : int ) -> List[str]:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__A= TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
__A= AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 186
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 1
@register_to_config
def __init__( self : Tuple , UpperCAmelCase : str=2000 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Tuple=20 , UpperCAmelCase : int=1e-3 ) -> int:
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : int = None
lowerCamelCase__ : Any = None
def A_ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, torch.device] = None ) -> Tuple:
lowerCamelCase__ : Dict = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def A_ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase__ : Optional[Any] = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase__ : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase__ : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase__ : Any = std.unsqueeze(-1 )
lowerCamelCase__ : Union[str, Any] = -score / std
# compute
lowerCamelCase__ : Optional[int] = -1.0 / len(self.timesteps )
lowerCamelCase__ : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase__ : str = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase__ : str = beta_t.unsqueeze(-1 )
lowerCamelCase__ : Union[str, Any] = -0.5 * beta_t * x
lowerCamelCase__ : List[str] = torch.sqrt(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = drift - diffusion**2 * score
lowerCamelCase__ : Tuple = x + drift * dt
# add noise
lowerCamelCase__ : Any = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
lowerCamelCase__ : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Union[str, Any] ) -> Optional[int]:
return self.config.num_train_timesteps
| 188
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : str=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Dict=10 , UpperCAmelCase : List[str]=[10, 20, 30, 40] , UpperCAmelCase : Any=[1, 1, 2, 1] , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict="relu" , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=None , ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : int = embeddings_size
lowerCamelCase__ : str = hidden_sizes
lowerCamelCase__ : Any = depths
lowerCamelCase__ : str = is_training
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Dict = num_labels
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : List[str] = len(UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values
def A_ ( self : Optional[int] ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A_ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = FlaxRegNetModel(config=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> Tuple:
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : str = FlaxRegNetForImageClassification(config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def A_ ( self : List[Any] ) -> None:
lowerCamelCase__ : List[Any] = FlaxRegNetModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A_ ( self : int ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Any ) -> str:
return
def A_ ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A_ ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A_ ( self : Any ) -> Tuple:
pass
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A_ ( self : int ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
lowerCamelCase__ : Any = model_class(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Dict ) -> int:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest('JIT Enabled' ):
lowerCamelCase__ : Dict = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ) -> List[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCamelCase__ : Union[str, Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase__ : Tuple = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 188
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 31
|
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = [1]
for i in range(2 , __lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCamelCase = []
__lowerCamelCase = list(range(__lowerCAmelCase ) )
# Find permutation
while factorials:
__lowerCamelCase = factorials.pop()
__lowerCamelCase , __lowerCamelCase = divmod(__lowerCAmelCase , __lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : List[Any]=1_8 , __lowerCAmelCase : Optional[Any]=3_0 , __lowerCAmelCase : str=4_0_0 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[Any]=True , ):
__snake_case = size if size is not None else {'height': 1_8, 'width': 1_8}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
def lowercase__ ( self : List[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class a_ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowercase__ ( self : Dict ):
__snake_case = ImageGPTImageProcessingTester(self )
@property
def lowercase__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , 'clusters' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCAmelCase , 'do_normalize' ) )
def lowercase__ ( self : Optional[int] ):
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def lowercase__ ( self : Optional[Any] ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
__snake_case = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , __lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(__lowerCAmelCase , 'image_processor.json' )
image_processor_first.to_json_file(__lowerCAmelCase )
__snake_case = self.image_processing_class.from_json_file(__lowerCAmelCase ).to_dict()
__snake_case = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
def lowercase__ ( self : List[str] ):
__snake_case = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__lowerCAmelCase )
__snake_case = self.image_processing_class.from_pretrained(__lowerCAmelCase ).to_dict()
__snake_case = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCAmelCase )
@unittest.skip('ImageGPT requires clusters at initialization' )
def lowercase__ ( self : int ):
pass
def lowerCamelCase__ ( ):
__snake_case = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
__snake_case = Image.open(dataset[4]['file'] )
__snake_case = Image.open(dataset[5]['file'] )
__snake_case = [imagea, imagea]
return images
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[Any] ):
__snake_case = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
__snake_case = prepare_images()
# test non-batched
__snake_case = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
__snake_case = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __lowerCAmelCase )
# test batched
__snake_case = image_processing(__lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
__snake_case = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __lowerCAmelCase )
| 427
|
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( a , a=None ):
require_version(deps[pkg] , a )
| 427
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a__ = 8
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple=BITS ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = x.device
_a : int = (x * 255).int().clamp(0 ,255 )
_a : Optional[int] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=__a )
_a : Union[str, Any] = rearrange(__a ,'''d -> d 1 1''' )
_a : Tuple = rearrange(__a ,'''b c h w -> b c 1 h w''' )
_a : int = ((x & mask) != 0).float()
_a : Dict = rearrange(__a ,'''b c d h w -> b (c d) h w''' )
_a : Any = bits * 2 - 1
return bits
def __UpperCAmelCase ( __a : List[str] ,__a : List[str]=BITS ) -> Optional[Any]:
"""simple docstring"""
_a : Any = x.device
_a : Union[str, Any] = (x > 0).int()
_a : Dict = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=__a ,dtype=torch.intaa )
_a : Any = rearrange(__a ,'''d -> d 1 1''' )
_a : Dict = rearrange(__a ,'''b (c d) h w -> b c d h w''' ,d=8 )
_a : Optional[Any] = reduce(x * mask ,'''b c d h w -> b c h w''' ,'''sum''' )
return (dec / 255).clamp(0.0 ,1.0 )
def __UpperCAmelCase ( self : Optional[int] ,__a : torch.FloatTensor ,__a : int ,__a : torch.FloatTensor ,__a : float = 0.0 ,__a : bool = True ,__a : int=None ,__a : bool = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a : List[str] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a : str = self.alphas_cumprod[timestep]
_a : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a : List[str] = self.bit_scale
if self.config.clip_sample:
_a : Tuple = torch.clamp(__a ,-scale ,__a )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a : List[Any] = self._get_variance(__a ,__a )
_a : List[str] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a : Optional[Any] = model_output.device if torch.is_tensor(__a ) else '''cpu'''
_a : Tuple = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=__a ).to(__a )
_a : Any = self._get_variance(__a ,__a ) ** 0.5 * eta * noise
_a : Dict = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__a ,pred_original_sample=__a )
def __UpperCAmelCase ( self : List[Any] ,__a : torch.FloatTensor ,__a : int ,__a : torch.FloatTensor ,__a : str="epsilon" ,__a : Optional[Any]=None ,__a : bool = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_a : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a : int = torch.split(__a ,sample.shape[1] ,dim=1 )
else:
_a : List[str] = None
# 1. compute alphas, betas
_a : Any = self.alphas_cumprod[t]
_a : Dict = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a : int = 1 - alpha_prod_t
_a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a : Dict = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_a : int = self.bit_scale
if self.config.clip_sample:
_a : Optional[Any] = torch.clamp(__a ,-scale ,__a )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a : List[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a : Optional[int] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a : Tuple = 0
if t > 0:
_a : Any = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=__a ).to(model_output.device )
_a : str = (self._get_variance(__a ,predicted_variance=__a ) ** 0.5) * noise
_a : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__a ,pred_original_sample=__a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a = 1.0 , ) -> Optional[Any]:
super().__init__()
_a : Optional[Any] = bit_scale
_a : Dict = (
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 2_5_6 , _a = 2_5_6 , _a = 5_0 , _a = None , _a = 1 , _a = "pil" , _a = True , **_a , ) -> Union[Tuple, ImagePipelineOutput]:
_a : Dict = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
_a : List[str] = decimal_to_bits(_a ) * self.bit_scale
_a : str = latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a : Any = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
_a : Dict = self.scheduler.step(_a , _a , _a ).prev_sample
_a : str = bits_to_decimal(_a )
if output_type == "pil":
_a : List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 14
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Tuple = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ["PoolFormerFeatureExtractor"]
UpperCamelCase__ : Tuple = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 719
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ : List[str] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
UpperCamelCase__ : str = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ = bs[:]
SCREAMING_SNAKE_CASE_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ):
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else bos_token
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else sep_token
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else cls_token
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='utf-8') as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_A)
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='utf-8') as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n')[1:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A))))
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def lowerCAmelCase__ ( self):
return len(self.encoder)
def lowerCAmelCase__ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def lowerCAmelCase__ ( self , _A):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = tuple(_A)
SCREAMING_SNAKE_CASE_ = get_pairs(_A)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf')))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_A):
try:
SCREAMING_SNAKE_CASE_ = word.index(_A , _A)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_A) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_A)
SCREAMING_SNAKE_CASE_ = new_word
if len(_A) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_A)
SCREAMING_SNAKE_CASE_ = ' '.join(_A)
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = []
for token in re.findall(self.pat , _A):
SCREAMING_SNAKE_CASE_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A).split(' '))
return bpe_tokens
def lowerCAmelCase__ ( self , _A):
return self.encoder.get(_A , self.encoder.get(self.unk_token))
def lowerCAmelCase__ ( self , _A):
return self.decoder.get(_A)
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = ''.join(_A)
SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def lowerCAmelCase__ ( self , _A , _A = None):
if not os.path.isdir(_A):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
SCREAMING_SNAKE_CASE_ = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n')
SCREAMING_SNAKE_CASE_ = 0
with open(_A , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_A) + '\n')
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self , _A , _A = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , _A , _A = None , _A = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is None:
return [1] + ([0] * len(_A)) + [1]
return [1] + ([0] * len(_A)) + [1, 1] + ([0] * len(_A)) + [1]
def lowerCAmelCase__ ( self , _A , _A = None):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCAmelCase__ ( self , _A , _A=False , **_A):
SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(_A) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ = ' ' + text
return (text, kwargs)
| 620
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase =logging.get_logger(__name__)
def snake_case ( a_ : Optional[Any] , a_ : Optional[Any]=False , a_ : Optional[Any]=False , a_ : int=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def snake_case ( a_ : List[str] , a_ : Any ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCamelCase_ : Optional[Any] = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_ : Any = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
UpperCamelCase_ : List[str] = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase_ : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCamelCase_ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase_ : Any = in_proj_bias[-config.hidden_size :]
def snake_case ( a_ : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def snake_case ( a_ : Tuple , a_ : Optional[Any] , a_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : str = dct.pop(a_ )
UpperCamelCase_ : Optional[Any] = val
@torch.no_grad()
def snake_case ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[str] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=a_ )
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : List[str] = False
if "vqa" in checkpoint_url:
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : Union[str, Any] = 3_129
UpperCamelCase_ : Optional[int] = """huggingface/label-files"""
UpperCamelCase_ : Union[str, Any] = """vqa2-id2label.json"""
UpperCamelCase_ : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_ : Any = {int(a_ ): v for k, v in idalabel.items()}
UpperCamelCase_ : int = idalabel
UpperCamelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : Tuple = ViltForQuestionAnswering(a_ )
elif "nlvr" in checkpoint_url:
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : Tuple = 2
UpperCamelCase_ : Union[str, Any] = {0: """False""", 1: """True"""}
UpperCamelCase_ : Any = {v: k for k, v in config.idalabel.items()}
UpperCamelCase_ : Optional[Any] = 3
UpperCamelCase_ : List[Any] = ViltForImagesAndTextClassification(a_ )
elif "irtr" in checkpoint_url:
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Dict = ViltForImageAndTextRetrieval(a_ )
elif "mlm_itm" in checkpoint_url:
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Tuple = ViltForMaskedLM(a_ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
UpperCamelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(a_ , map_location="""cpu""" )["""state_dict"""]
UpperCamelCase_ : List[Any] = create_rename_keys(a_ , a_ , a_ , a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ )
if mlm_model or irtr_model:
UpperCamelCase_ : Optional[Any] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a_ )
# Define processor
UpperCamelCase_ : int = ViltImageProcessor(size=384 )
UpperCamelCase_ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCamelCase_ : int = ViltProcessor(a_ , a_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCamelCase_ : int = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=a_ ).raw )
UpperCamelCase_ : Any = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=a_ ).raw )
UpperCamelCase_ : List[str] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
UpperCamelCase_ : Dict = processor(a_ , a_ , return_tensors="""pt""" )
UpperCamelCase_ : Union[str, Any] = processor(a_ , a_ , return_tensors="""pt""" )
UpperCamelCase_ : Optional[int] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCamelCase_ : Union[str, Any] = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=a_ ).raw )
if mlm_model:
UpperCamelCase_ : int = """a bunch of [MASK] laying on a [MASK]."""
else:
UpperCamelCase_ : Optional[Any] = """How many cats are there?"""
UpperCamelCase_ : List[Any] = processor(a_ , a_ , return_tensors="""pt""" )
UpperCamelCase_ : List[str] = model(**a_ )
# Verify outputs
if mlm_model:
UpperCamelCase_ : Any = torch.Size([1, 11, 30_522] )
UpperCamelCase_ : Tuple = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a_ , atol=1e-4 )
# verify masked token prediction equals "cats"
UpperCamelCase_ : str = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCamelCase_ : List[str] = torch.Size([1, 3_129] )
UpperCamelCase_ : Optional[Any] = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a_ , atol=1e-4 )
# verify vqa prediction equals "2"
UpperCamelCase_ : Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCamelCase_ : int = torch.Size([1, 2] )
UpperCamelCase_ : Any = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a_ ).mkdir(exist_ok=a_ )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 208
|
'''simple docstring'''
def snake_case ( a_ : list ) -> list:
"""simple docstring"""
UpperCamelCase_ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase_ : Tuple = True
for i in range(0 , len(a_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_ : Any = False
for i in range(1 , len(a_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase_ , UpperCamelCase_ : List[str] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase_ : Dict = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
UpperCamelCase =[int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase =odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 208
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=None ):
'''simple docstring'''
if attention_mask is None:
_lowerCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase_ :
__lowerCamelCase : Dict = OPTConfig
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : Optional[Any] = "gelu"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=20 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=16 , _lowerCAmelCase=16 , ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = embed_dim
_lowerCAmelCase = word_embed_proj_dim
_lowerCAmelCase = False
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_lowerCAmelCase , **self.config_updates , )
_lowerCAmelCase = prepare_opt_inputs_dict(_lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = TFOPTModel(config=_lowerCAmelCase )
_lowerCAmelCase = inputs_dict["input_ids"]
_lowerCAmelCase = input_ids[:1, :]
_lowerCAmelCase = inputs_dict["attention_mask"][:1, :]
_lowerCAmelCase = 1
# first forward pass
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-3 )
@require_tf
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase : int = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase : Tuple = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Tuple = 10
def _snake_case ( self ) -> Any:
_lowerCAmelCase = TFOPTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_lowerCAmelCase , _lowerCAmelCase ):
if hasattr(_lowerCAmelCase , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_lowerCAmelCase , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCAmelCase = model_class(config=_lowerCAmelCase )
_lowerCAmelCase = _get_word_embedding_weight(_lowerCAmelCase , model.get_input_embeddings() )
_lowerCAmelCase = _get_word_embedding_weight(_lowerCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_lowerCAmelCase )
_lowerCAmelCase = _get_word_embedding_weight(_lowerCAmelCase , model.get_input_embeddings() )
_lowerCAmelCase = _get_word_embedding_weight(_lowerCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _lowerCAmelCase )
# check that weights remain the same after resizing
_lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCAmelCase = False
self.assertTrue(_lowerCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _lowerCAmelCase )
_lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCAmelCase = False
self.assertTrue(_lowerCAmelCase )
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase : List[str] = 99
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCAmelCase = input_ids.shape[0]
_lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = TFOPTModel.from_pretrained("facebook/opt-350m" )
_lowerCAmelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase = tf.not_equal(_lowerCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ).last_hidden_state
_lowerCAmelCase = (1, 11, 512)
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=4E-3 ) )
_lowerCAmelCase = tf.function(_lowerCAmelCase , jit_compile=_lowerCAmelCase )
_lowerCAmelCase = xla_generate(_lowerCAmelCase , _lowerCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
super().setUp()
_lowerCAmelCase = "facebook/opt-350m"
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCAmelCase = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="tf" , padding=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCAmelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-4 ) )
_lowerCAmelCase = tf.function(_lowerCAmelCase , jit_compile=_lowerCAmelCase )
_lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def _snake_case ( self ) -> Union[str, Any]:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = "facebook/opt-125m"
_lowerCAmelCase = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowerCAmelCase = []
_lowerCAmelCase = GPTaTokenizer.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFOPTForCausalLM.from_pretrained(_lowerCAmelCase )
for prompt in self.prompts:
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="tf" ).input_ids
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_length=10 )
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = "facebook/opt-350m"
_lowerCAmelCase = GPTaTokenizer.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFOPTForCausalLM.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = "left"
# use different length sentences to test batching
_lowerCAmelCase = [
"Hello, my dog is a little",
"Today, I",
]
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="tf" , padding=_lowerCAmelCase )
_lowerCAmelCase = inputs["input_ids"]
_lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , attention_mask=inputs["attention_mask"] )
_lowerCAmelCase = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase )
_lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_lowerCAmelCase = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_lowerCAmelCase = model.generate(input_ids=_lowerCAmelCase , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [non_padded_sentence, padded_sentence] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = "facebook/opt-350m"
_lowerCAmelCase = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowerCAmelCase = []
_lowerCAmelCase = GPTaTokenizer.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = TFOPTForCausalLM.from_pretrained(_lowerCAmelCase )
for prompt in self.prompts:
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="tf" ).input_ids
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_length=10 )
_lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 489
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "▁"
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_SCREAMING_SNAKE_CASE = {
"facebook/xglm-564M": 20_48,
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCAmelCase = 7
_lowerCAmelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_lowerCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ) -> Any:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 489
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( snake_case_ :list[list[float]] ):
__UpperCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
__UpperCAmelCase , __UpperCAmelCase = matrix[1][1], matrix[0][0]
__UpperCAmelCase , __UpperCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__UpperCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCAmelCase = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
__UpperCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCAmelCase = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 49
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
__a = {}
def __UpperCamelCase ( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = super().add_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCamelCase ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=1 , **lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
output.append(lowerCamelCase )
else:
__a = []
for i in range(lowerCamelCase ):
__a = placeholder_token + F"""_{i}"""
self.try_adding_tokens(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
output.append(lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
__a = output
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 ) ->int:
'''simple docstring'''
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = []
for i in range(len(lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__a = self.token_map[placeholder_token]
__a = tokens[: 1 + int(len(lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
__a = copy.copy(lowerCamelCase )
random.shuffle(lowerCamelCase )
__a = text.replace(lowerCamelCase , ' '.join(lowerCamelCase ) )
return text
def __call__( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase , vector_shuffle=lowerCamelCase , prop_tokens_to_load=lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase , )
def __UpperCamelCase ( self , lowerCamelCase , *lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1.0 , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase , vector_shuffle=lowerCamelCase , prop_tokens_to_load=lowerCamelCase ) , *lowerCamelCase , **lowerCamelCase , )
| 448
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def snake_case ( snake_case__ :Optional[int] , snake_case__ :int) -> List[Any]:
_A = tmp_path_factory.mktemp("""dset_infos_dir""")
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""") as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""")
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""") as f:
f.write("""""")
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""") as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""")
_A = DatasetInfosDict.from_directory(snake_case__)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :DatasetInfo) -> Any:
_A = str(snake_case__)
dataset_info.write_to_directory(snake_case__)
_A = DatasetInfo.from_directory(snake_case__)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , """dataset_info.json"""))
def snake_case ( ) -> str:
_A = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""")}) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
_A = dataset_info._to_yaml_dict()
assert sorted(snake_case__) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str))
_A = yaml.safe_dump(snake_case__)
_A = yaml.safe_load(snake_case__)
assert dataset_info_yaml_dict == reloaded
def snake_case ( ) -> Optional[Any]:
_A = DatasetInfo()
_A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()}),
DatasetInfosDict({"""my_config_name""": DatasetInfo()}),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
}),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42),
"""v2""": DatasetInfo(dataset_size=1_337),
}),
] , )
def snake_case ( snake_case__ :Tuple , snake_case__ :DatasetInfosDict) -> List[str]:
_A = str(snake_case__)
dataset_infos_dict.write_to_directory(snake_case__)
_A = DatasetInfosDict.from_directory(snake_case__)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , """README.md"""))
| 83
|
def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83
| 1
|
def __UpperCAmelCase ( __A , __A ) -> int:
'''simple docstring'''
return abs(__A ) if a == 0 else greatest_common_divisor(b % a , __A )
def __UpperCAmelCase ( __A , __A ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase__ = y, x % y
return abs(__A )
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
try:
UpperCAmelCase__ = input("Enter two integers separated by comma (,): " ).split("," )
UpperCAmelCase__ = int(nums[0] )
UpperCAmelCase__ = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(__A , __A )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__A , __A )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 475
|
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->int:
"""simple docstring"""
return abs(UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a, UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->int:
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__magic_name__ , __magic_name__ : List[str] = y, x % y
return abs(UpperCAmelCase )
def lowerCAmelCase ( ) ->List[Any]:
"""simple docstring"""
try:
__magic_name__ : List[str] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__magic_name__ : List[str] = int(nums[0] )
__magic_name__ : int = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(UpperCAmelCase, UpperCAmelCase )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCAmelCase, UpperCAmelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 154
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_SCREAMING_SNAKE_CASE = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _snake_case (_snake_case : str) -> Dict:
_lowercase =torch.load(_snake_case , map_location='cpu')
return sd
def _snake_case (_snake_case : Optional[int] , _snake_case : Dict , _snake_case : Optional[Any]=rename_keys_prefix) -> Any:
_lowercase =OrderedDict()
_lowercase =torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowercase =key
for name_pair in rename_keys_prefix:
_lowercase =new_key.replace(name_pair[0] , name_pair[1])
_lowercase =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowercase =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case (_snake_case : Any , _snake_case : Optional[int]) -> Dict:
assert (
checkpoint_path.split('/')[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_lowercase ='pretraining'
if "vcr" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 512}
_lowercase ='multichoice'
elif "vqa_advanced" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 2048}
_lowercase ='vqa_advanced'
elif "vqa" in checkpoint_path:
_lowercase ={'visual_embedding_dim': 2048, 'num_labels': 3129}
_lowercase ='vqa'
elif "nlvr" in checkpoint_path:
_lowercase ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
_lowercase ='nlvr'
_lowercase =VisualBertConfig(**_snake_case)
# Load State Dict
_lowercase =load_state_dict(_snake_case)
_lowercase =get_new_dict(_snake_case , _snake_case)
if model_type == "pretraining":
_lowercase =VisualBertForPreTraining(_snake_case)
elif model_type == "vqa":
_lowercase =VisualBertForQuestionAnswering(_snake_case)
elif model_type == "nlvr":
_lowercase =VisualBertForVisualReasoning(_snake_case)
elif model_type == "multichoice":
_lowercase =VisualBertForMultipleChoice(_snake_case)
model.load_state_dict(_snake_case)
# Save Checkpoints
Path(_snake_case).mkdir(exist_ok=_snake_case)
model.save_pretrained(_snake_case)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 557
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 557
| 1
|
# flake8: noqa
# Lint as: python3
lowerCamelCase : Any = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 70
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ) -> None:
'''simple docstring'''
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 640
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCAmelCase__ : List[Any] = (boundary[1] - boundary[0]) / steps
lowerCAmelCase__ : Optional[Any] = boundary[0]
lowerCAmelCase__ : List[Any] = boundary[1]
lowerCAmelCase__ : Optional[int] = make_points(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = 0.0
y += (h / 2.0) * f(__UpperCAmelCase )
for i in x_i:
# print(i)
y += h * f(__UpperCAmelCase )
y += (h / 2.0) * f(__UpperCAmelCase )
return y
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[Any] = a + h
while x < (b - h):
yield x
lowerCAmelCase__ : str = x + h
def lowercase_ ( __UpperCAmelCase ) -> List[Any]: # enter your function here
lowerCAmelCase__ : Tuple = (x - 0) * (x - 0)
return y
def lowercase_ ( ) -> str:
lowerCAmelCase__ : int = 0.0 # Lower bound of integration
lowerCAmelCase__ : Optional[Any] = 1.0 # Upper bound of integration
lowerCAmelCase__ : Optional[Any] = 10.0 # define number of steps or resolution
lowerCAmelCase__ : Optional[Any] = [a, b] # define boundary of integration
lowerCAmelCase__ : Tuple = method_a(__UpperCAmelCase , __UpperCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowerCamelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCamelCase : int = 16 , UpperCamelCase : int = 88 , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 32 , UpperCamelCase : Optional[int] = None , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "geglu" , UpperCamelCase : Optional[int] = None , ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase , attention_head_dim=UpperCamelCase , in_channels=UpperCamelCase , num_layers=UpperCamelCase , dropout=UpperCamelCase , norm_num_groups=UpperCamelCase , cross_attention_dim=UpperCamelCase , attention_bias=UpperCamelCase , sample_size=UpperCamelCase , num_vector_embeds=UpperCamelCase , activation_fn=UpperCamelCase , num_embeds_ada_norm=UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase__ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase__ : List[str] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase__ : int = [1, 0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int=None , UpperCamelCase : Tuple=None , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = hidden_states
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Dict = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase__ : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase__ : int = self.transformer_index_for_condition[i]
lowerCAmelCase__ : str = self.transformers[transformer_index](
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase__ : int = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase__ : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase )
| 507
| 0
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A__ ( __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : int=10 , __lowerCAmelCase : Optional[Any]=100 , __lowerCAmelCase : Union[str, Any]=1026 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , __lowerCAmelCase : Optional[int]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase__ , lowerCamelCase__ = generate_datasets(
__lowerCAmelCase , __lowerCAmelCase , number=__lowerCAmelCase , min_len=1026 , trim=__lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
lowerCamelCase__ = load_gpta("""gpt2""" ).to(__lowerCAmelCase )
print("""computing perplexity on objective set""" )
lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).item()
print("""perplexity on objective set:""" , __lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any=15 , __lowerCAmelCase : List[str]=128 , __lowerCAmelCase : Any=100 , __lowerCAmelCase : Optional[int]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowerCamelCase__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase__ = SecondaryLearner(__lowerCAmelCase )
# Train secondary learner
lowerCamelCase__ = train_secondary_learner(
__lowerCAmelCase , __lowerCAmelCase , max_epochs=__lowerCAmelCase , batch_size=__lowerCAmelCase , eval_freq=100 , igf_model_path=__lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=32 , __lowerCAmelCase : Tuple=1000 , __lowerCAmelCase : str=16 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : List[str]=recopy_gpta , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=10 , __lowerCAmelCase : int="gpt2_finetuned.pt" , ):
lowerCamelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
lowerCamelCase__ = RandomSampler(__lowerCAmelCase )
lowerCamelCase__ = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase )
lowerCamelCase__ = max_steps // (len(__lowerCAmelCase )) + 1
lowerCamelCase__ = 0
lowerCamelCase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = recopy_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowerCAmelCase )
secondary_learner.eval()
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = []
lowerCamelCase__ = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
test_perps.append(__lowerCAmelCase )
print("""Test perplexity, step""" , __lowerCAmelCase , """:""" , __lowerCAmelCase )
for epoch in range(int(__lowerCAmelCase ) ):
for step, example in enumerate(__lowerCAmelCase ):
torch.cuda.empty_cache()
lowerCamelCase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCamelCase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
lowerCamelCase__ = True
if secondary_learner is not None:
lowerCamelCase__ = secondary_learner.forward(
torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase__ = -1
if predicted_q < threshold:
lowerCamelCase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
test_perps.append(__lowerCAmelCase )
print("""Test perplexity, step""" , __lowerCAmelCase , """:""" , __lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=__lowerCAmelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__lowerCAmelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__lowerCAmelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=__lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__lowerCAmelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__lowerCAmelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__lowerCAmelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
lowerCamelCase__ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
lowerCamelCase__ = training_secondary_learner(
__lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
lowerCamelCase__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase__ , lowerCamelCase__ = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=__lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowerCAmelCase , secondary_learner=__lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 50
|
lowerCamelCase_ : Tuple = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCamelCase_ : Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
UpperCamelCase_: Any = from_type.lower().strip("""s""" )
UpperCamelCase_: int = to_type.lower().strip("""s""" )
UpperCamelCase_: Any = UNIT_SYMBOL.get(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: str = UNIT_SYMBOL.get(lowerCamelCase , lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase_: Optional[int] = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase_: Dict = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = METRIC_CONVERSION[from_sanitized]
UpperCamelCase_: str = METRIC_CONVERSION[to_sanitized]
UpperCamelCase_: Tuple = 1
if from_exponent > to_exponent:
UpperCamelCase_: Union[str, Any] = from_exponent - to_exponent
else:
UpperCamelCase_: Dict = -(to_exponent - from_exponent)
return value * pow(10 , lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 548
| 0
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@require_torch
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
UpperCamelCase = load_dataset('ashraq/esc50' )
UpperCamelCase = dataset['train']['audio'][-1]['array']
UpperCamelCase = audio_classifier(A_ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(A_ ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
UpperCamelCase = load_dataset('ashraq/esc50' )
UpperCamelCase = dataset['train']['audio'][-1]['array']
UpperCamelCase = audio_classifier(A_ , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
UpperCamelCase = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
UpperCamelCase = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
| 718
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ = None , A_ = None )-> List[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = pad_token_id
UpperCamelCase = max_length
UpperCamelCase = vocab
UpperCamelCase = merges
UpperCamelCase = BytePairTokenizer(A_ , A_ , sequence_length=A_ )
@classmethod
def UpperCAmelCase_ ( cls , A_ , *A_ , **A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [' '.join(A_ ) for m in tokenizer.bpe_ranks.keys()]
UpperCamelCase = tokenizer.get_vocab()
return cls(A_ , A_ , *A_ , **A_ )
@classmethod
def UpperCAmelCase_ ( cls , A_ , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = GPTaTokenizer.from_pretrained(A_ , *A_ , **A_ )
return cls.from_tokenizer(A_ , *A_ , **A_ )
@classmethod
def UpperCAmelCase_ ( cls , A_ )-> List[Any]:
'''simple docstring'''
return cls(**A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.tf_tokenizer(A_ )
UpperCamelCase = tf.ones_like(A_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCamelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCamelCase , UpperCamelCase = pad_model_inputs(
A_ , max_seq_length=A_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 432
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE__ : Tuple = 5_0_0
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = HTTPError
SCREAMING_SNAKE_CASE__ : str = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A_ ( self : int ) -> str:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : int = mock.Mock()
SCREAMING_SNAKE_CASE__ : Any = 5_0_0
SCREAMING_SNAKE_CASE__ : int = {}
SCREAMING_SNAKE_CASE__ : int = HTTPError
SCREAMING_SNAKE_CASE__ : int = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE__ : str = tempfile.mktemp()
with open(_UpperCAmelCase, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = AlbertTokenizer.from_pretrained(_UpperCAmelCase )
finally:
os.remove(_UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ : Tuple = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizer(_UpperCAmelCase )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase, repo_id="test-tokenizer", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Dict = BertTokenizer(_UpperCAmelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_UpperCAmelCase, repo_id="valid_org/test-tokenizer-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Tuple = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CustomTokenizer(_UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Tuple = BertTokenizerFast.from_pretrained(_UpperCAmelCase )
bert_tokenizer.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = CustomTokenizerFast.from_pretrained(_UpperCAmelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=_UpperCAmelCase, trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def A_ ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def A_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE__ : Dict = Trie()
SCREAMING_SNAKE_CASE__ : str = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(_UpperCAmelCase, ["AB", "C"] )
| 663
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = GPTaTokenizer
UpperCAmelCase_ = GPTaTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = {"add_prefix_space": True}
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer"
return input_text, output_text
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# Simple input
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : int = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1], 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1], 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$"
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0], _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> str:
"""simple docstring"""
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this."
SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please."
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("test_opt" )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_UpperCAmelCase, )
# Same as above
self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = "bos"
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"]
SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(
_UpperCAmelCase, )
# We changed the bos token
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("./tok" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_UpperCAmelCase, )
self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 663
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_UpperCamelCase , x % y )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_UpperCamelCase , _UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 20 ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ : Optional[Any] = lcm(_UpperCamelCase , _UpperCamelCase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 721
|
'''simple docstring'''
from math import factorial
_lowerCAmelCase = {str(d): factorial(d) for d in range(10)}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCamelCase ) if sum_of_digit_factorial(UpperCamelCase ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 160
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a :
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : Any=4 , lowercase_ : Dict=2 , lowercase_ : str=7 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : str=True , lowercase_ : str=True , lowercase_ : Tuple=99 , lowercase_ : Union[str, Any]=36 , lowercase_ : str=2 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : int=0.1 , lowercase_ : Dict=512 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Any=0.02 , lowercase_ : Any=6 , lowercase_ : Any=6 , lowercase_ : List[str]=3 , lowercase_ : Optional[Any]=4 , lowercase_ : Dict=None , lowercase_ : Dict=1000 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ = text_seq_length
snake_case_ = (image_size // patch_size) ** 2 + 1
snake_case_ = self.text_seq_length + self.image_seq_length
def A_ ( self : Union[str, Any] ):
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
snake_case_ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = tmp_coordinate
snake_case_ = tf.constant(lowerCAmelCase__ )
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : Any , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str ):
snake_case_ = TFLayoutLMvaModel(config=lowerCAmelCase__ )
# text + image
snake_case_ = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ )
snake_case_ = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , training=lowerCAmelCase__ , )
snake_case_ = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase__ )
snake_case_ = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : Any ):
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
snake_case_ = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : str , lowercase_ : List[Any] ):
snake_case_ = 2
snake_case_ = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
snake_case_ = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int ):
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) = config_and_inputs
snake_case_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class a ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
snake_case_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Dict , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str ):
return True
def A_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str=False ):
snake_case_ = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ = {
k: tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def A_ ( self : Optional[int] ):
snake_case_ = TFLayoutLMvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def A_ ( self : Tuple ):
self.config_tester.run_common_tests()
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__ )
if getattr(lowerCAmelCase__ , '''hf_compute_loss''' , lowerCAmelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
snake_case_ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
snake_case_ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase__ )[0]
]
snake_case_ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
snake_case_ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
snake_case_ = prepared_for_class.pop('''input_ids''' )
snake_case_ = model(lowerCAmelCase__ , **lowerCAmelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
snake_case_ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
snake_case_ = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
snake_case_ = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
snake_case_ = -100
snake_case_ = tf.convert_to_tensor(lowerCAmelCase__ )
snake_case_ = model(lowerCAmelCase__ , **lowerCAmelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
snake_case_ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
snake_case_ = model(lowerCAmelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
snake_case_ = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
# Get keys that were added with the _prepare_for_class function
snake_case_ = prepared_for_class.keys() - inputs_dict.keys()
snake_case_ = inspect.signature(model.call ).parameters
snake_case_ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
snake_case_ = {0: '''input_ids'''}
for label_key in label_keys:
snake_case_ = signature_names.index(lowerCAmelCase__ )
snake_case_ = label_key
snake_case_ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
snake_case_ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
snake_case_ = prepared_for_class[value]
snake_case_ = tuple(lowerCAmelCase__ )
# Send to model
snake_case_ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def A_ ( self : List[Any] ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def A_ ( self : Optional[int] ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def A_ ( self : Any ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def A_ ( self : Dict ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def A_ ( self : int ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def A_ ( self : List[Any] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Tuple ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def A_ ( self : Optional[int] ):
snake_case_ = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' ).pixel_values
snake_case_ = tf.constant([[1, 2]] )
snake_case_ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
snake_case_ = model(input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ )
# verify the logits
snake_case_ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 640
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
debug_launcher(test_script.main )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(test_ops.main )
| 534
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCAmelCase = get_logger()
__lowerCAmelCase = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
super().__init__(features=__SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
a_ : Union[str, Any] = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a_ : Optional[int] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
a_ : Any = str(jax.devices()[0] )
a_ : Tuple = jnp_array_kwargs
@staticmethod
def SCREAMING_SNAKE_CASE ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0 )
return column
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a_ : List[Any] = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a_ : str = {'''dtype''': jnp.intaa}
else:
a_ : Union[str, Any] = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a_ : Tuple = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a_ : Union[str, Any] = np.asarray(__SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a_ : List[str] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''' ) and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array ):
a_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : dict ) -> int:
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : pa.Table ) -> Mapping:
a_ : Dict = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE )
a_ : str = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : pa.Table ) -> "jax.Array":
a_ : Optional[Any] = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
a_ : Any = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self._consolidate(__SCREAMING_SNAKE_CASE )
return column
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : pa.Table ) -> Mapping:
a_ : Optional[Any] = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE )
a_ : Dict = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE )
a_ : str = self.recursive_tensorize(__SCREAMING_SNAKE_CASE )
for column_name in batch:
a_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 666
|
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666
| 1
|
'''simple docstring'''
import numpy as np
import datasets
lowerCAmelCase_ = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
lowerCAmelCase_ = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
lowerCAmelCase_ = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case( datasets.Metric ):
def _UpperCamelCase (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def _UpperCamelCase (self : Dict , a : str , a : str ) -> List[Any]:
"""simple docstring"""
A__ = np.array(a )
A__ = np.array(a )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
A__ = X - np.mean(a )
A__ = np.cov(reference_distribution.T )
try:
A__ = np.linalg.inv(a )
except np.linalg.LinAlgError:
A__ = np.linalg.pinv(a )
A__ = np.dot(a , a )
A__ = np.dot(a , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 531
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = 16 ):
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('bert-base-cased' )
A__ = DatasetDict(
{
'train': dataset['train'].select(UpperCAmelCase ),
'validation': dataset['train'].select(UpperCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=UpperCAmelCase ,max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCAmelCase ,batched=UpperCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCAmelCase ,padding='longest' ,max_length=UpperCAmelCase ,pad_to_multiple_of=UpperCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['train'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets['validation'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
A__ = DataLoader(
tokenized_datasets['test'] ,shuffle=UpperCAmelCase ,collate_fn=UpperCAmelCase ,batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = []
# Download the dataset
A__ = load_dataset('glue' ,'mrpc' )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets['train'].num_rows ) ,datasets['train']['label'] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCAmelCase ):
A__ , A__ , A__ = get_fold_dataloaders(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() ,lr=UpperCAmelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase ,num_warmup_steps=100 ,num_training_steps=(len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCAmelCase )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCAmelCase ,references=UpperCAmelCase ,)
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,UpperCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCAmelCase )
A__ = outputs.logits
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCAmelCase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(UpperCAmelCase ,dim=0 )
A__ = torch.stack(UpperCAmelCase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=UpperCAmelCase ,references=UpperCAmelCase )
accelerator.print('Average test metrics from all folds:' ,UpperCAmelCase )
def _A ( ):
'''simple docstring'''
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=UpperCAmelCase ,default=UpperCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' ,type=UpperCAmelCase ,default=3 ,help='The number of splits to perform across the dataset' )
A__ = parser.parse_args()
A__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase ,UpperCAmelCase )
if __name__ == "__main__":
main()
| 531
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ :Union[str, Any] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A_ :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 154
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A_ :Dict = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :List[str] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :Dict = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
A_ :Tuple = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A_ :List[str] = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
A_ :int = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def A ( a_ ) -> Optional[Any]:
if isinstance(a_ ,a_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def A ( a_ ,a_ ,a_ ,a_ ,a_=False ) -> Dict:
__UpperCamelCase : str =checkpoint[F'{old_prefix}.in_layers.0.weight']
__UpperCamelCase : Optional[Any] =checkpoint[F'{old_prefix}.in_layers.0.bias']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.in_layers.2.weight']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.in_layers.2.bias']
__UpperCamelCase : int =checkpoint[F'{old_prefix}.emb_layers.1.weight']
__UpperCamelCase : str =checkpoint[F'{old_prefix}.emb_layers.1.bias']
__UpperCamelCase : List[Any] =checkpoint[F'{old_prefix}.out_layers.0.weight']
__UpperCamelCase : List[str] =checkpoint[F'{old_prefix}.out_layers.0.bias']
__UpperCamelCase : str =checkpoint[F'{old_prefix}.out_layers.3.weight']
__UpperCamelCase : Any =checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__UpperCamelCase : Union[str, Any] =checkpoint[F'{old_prefix}.skip_connection.weight']
__UpperCamelCase : Dict =checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def A ( a_ ,a_ ,a_ ,a_ ,a_=None ) -> int:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 ,dim=0 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] =checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 ,dim=0 )
__UpperCamelCase : Tuple =checkpoint[F'{old_prefix}.norm.weight']
__UpperCamelCase : Optional[Any] =checkpoint[F'{old_prefix}.norm.bias']
__UpperCamelCase : Union[str, Any] =weight_q.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Union[str, Any] =bias_q.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Dict =weight_k.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Optional[int] =bias_k.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : str =weight_v.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Optional[Any] =bias_v.squeeze(-1 ).squeeze(-1 )
__UpperCamelCase : Tuple =(
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__UpperCamelCase : Union[str, Any] =checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A ( a_ ,a_ ) -> Union[str, Any]:
__UpperCamelCase : Tuple =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : List[str] ={}
__UpperCamelCase : Any =checkpoint['time_embed.0.weight']
__UpperCamelCase : int =checkpoint['time_embed.0.bias']
__UpperCamelCase : Tuple =checkpoint['time_embed.2.weight']
__UpperCamelCase : List[str] =checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
__UpperCamelCase : List[str] =checkpoint['label_emb.weight']
__UpperCamelCase : Tuple =checkpoint['input_blocks.0.0.weight']
__UpperCamelCase : str =checkpoint['input_blocks.0.0.bias']
__UpperCamelCase : Optional[Any] =unet_config['down_block_types']
__UpperCamelCase : Optional[int] =unet_config['layers_per_block']
__UpperCamelCase : int =unet_config['attention_head_dim']
__UpperCamelCase : List[str] =unet_config['block_out_channels']
__UpperCamelCase : Tuple =1
__UpperCamelCase : Union[str, Any] =channels_list[0]
for i, layer_type in enumerate(a_ ):
__UpperCamelCase : List[Any] =channels_list[i]
__UpperCamelCase : Optional[int] =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(a_ ):
__UpperCamelCase : Dict =F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Any =F'input_blocks.{current_layer}.0'
__UpperCamelCase : Any =True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : Optional[int] =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(a_ ):
__UpperCamelCase : Optional[Any] =F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Union[str, Any] =F'input_blocks.{current_layer}.0'
__UpperCamelCase : Any =True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : List[str] =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
__UpperCamelCase : Union[str, Any] =F'down_blocks.{i}.attentions.{j}'
__UpperCamelCase : Optional[Any] =F'input_blocks.{current_layer}.1'
__UpperCamelCase : Optional[Any] =convert_attention(
a_ ,a_ ,a_ ,a_ ,a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : List[Any] =F'down_blocks.{i}.downsamplers.0'
__UpperCamelCase : int =F'input_blocks.{current_layer}.0'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ )
current_layer += 1
__UpperCamelCase : Optional[Any] =current_channels
# hardcoded the mid-block for now
__UpperCamelCase : Any ='mid_block.resnets.0'
__UpperCamelCase : List[str] ='middle_block.0'
__UpperCamelCase : Union[str, Any] =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[Any] ='mid_block.attentions.0'
__UpperCamelCase : Optional[Any] ='middle_block.1'
__UpperCamelCase : List[Any] =convert_attention(a_ ,a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : str ='mid_block.resnets.1'
__UpperCamelCase : Optional[Any] ='middle_block.2'
__UpperCamelCase : int =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Optional[int] =unet_config['up_block_types']
for i, layer_type in enumerate(a_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCamelCase : Any =F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : Dict =F'output_blocks.{current_layer}.0'
__UpperCamelCase : Dict =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : List[str] =F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : Optional[Any] =F'output_blocks.{current_layer-1}.1'
__UpperCamelCase : Union[str, Any] =convert_resnet(a_ ,a_ ,a_ ,a_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__UpperCamelCase : Optional[Any] =F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : Optional[Any] =F'output_blocks.{current_layer}.0'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ ,has_skip=a_ )
__UpperCamelCase : Optional[int] =F'up_blocks.{i}.attentions.{j}'
__UpperCamelCase : Tuple =F'output_blocks.{current_layer}.1'
__UpperCamelCase : str =convert_attention(
a_ ,a_ ,a_ ,a_ ,a_ )
current_layer += 1
if i != len(a_ ) - 1:
__UpperCamelCase : Optional[Any] =F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : str =F'output_blocks.{current_layer-1}.2'
__UpperCamelCase : str =convert_resnet(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : Any =checkpoint['out.0.weight']
__UpperCamelCase : Any =checkpoint['out.0.bias']
__UpperCamelCase : Optional[Any] =checkpoint['out.2.weight']
__UpperCamelCase : List[Any] =checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
A_ :int = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
A_ :List[Any] = parser.parse_args()
A_ :Tuple = strabool(args.class_cond)
A_ :List[str] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
A_ :Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ :List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A_ :Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
A_ :List[str] = None
A_ :List[str] = con_pt_to_diffuser(args.unet_path, unet_config)
A_ :str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A_ :List[str] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A_ :Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ :Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
A_ :Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
A_ :int = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 154
| 1
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 0
while len(a__) > 1:
_a = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
_a = files.index(min(a__))
temp += files[min_index]
files.pop(a__)
files.append(a__)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
'''simple docstring'''
from collections import deque
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , snake_case : str , snake_case : int , snake_case : int ):
"""simple docstring"""
_snake_case : Optional[Any] = process_name # process name
_snake_case : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_snake_case : List[str] = arrival_time
_snake_case : Optional[Any] = burst_time # remaining burst time
_snake_case : Dict = 0 # total time of the process wait in ready queue
_snake_case : Optional[int] = 0 # time from arrival time to completion time
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , snake_case : int , snake_case : list[int] , snake_case : deque[Process] , snake_case : int , ):
"""simple docstring"""
_snake_case : Union[str, Any] = number_of_queues
# time slice of queues that round robin algorithm applied
_snake_case : Optional[Any] = time_slices
# unfinished process is in this ready_queue
_snake_case : Tuple = queue
# current time
_snake_case : List[Any] = current_time
# finished process is in this sequence queue
_snake_case : deque[Process] = deque()
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self : Tuple , snake_case : list[Process] ):
"""simple docstring"""
_snake_case : List[str] = []
for i in range(len(snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self : Any , snake_case : list[Process] ):
"""simple docstring"""
_snake_case : Dict = []
for i in range(len(snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self : List[Any] , snake_case : list[Process] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
for i in range(len(snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : Dict , snake_case : deque[Process] ):
"""simple docstring"""
_snake_case : deque[Process] = deque() # sequence deque of finished process
while len(snake_case ) != 0:
_snake_case : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_snake_case : Optional[int] = 0
# set the process's turnaround time because it is finished
_snake_case : Optional[int] = self.current_time - cp.arrival_time
# set the completion time
_snake_case : Tuple = self.current_time
# add the process to queue that has finished queue
finished.append(snake_case )
self.finish_queue.extend(snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : Tuple , snake_case : deque[Process] , snake_case : int ):
"""simple docstring"""
_snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(snake_case ) ):
_snake_case : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_snake_case : Tuple = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_snake_case : Dict = 0
# set the finish time
_snake_case : Tuple = self.current_time
# update the process' turnaround time because it is finished
_snake_case : Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(snake_case )
self.finish_queue.extend(snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
_snake_case , _snake_case : Any = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE_ = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE_ = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE_ = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE_ = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = [17, 25]
SCREAMING_SNAKE_CASE_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE_ = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE_ = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE_ = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE_ = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = [17, 25]
SCREAMING_SNAKE_CASE_ = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE_ = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 517
| 0
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__lowerCAmelCase : Dict =["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase = tmp_path_factory.getbasetemp() / """cache"""
lowercase = test_hf_cache_home / """datasets"""
lowercase = test_hf_cache_home / """metrics"""
lowercase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCAmelCase__ ) )
lowercase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCAmelCase__ ) )
lowercase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__ , scope="""session""" )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> int:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCAmelCase__ )
@pytest.fixture
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCAmelCase__ )
| 197
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] ) -> int:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_sql_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_sql_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(lowerCAmelCase__ ) ) as con:
lowercase = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = os.path.join(lowerCAmelCase__ , """tmp.sql""" )
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
SqlDatasetWriter(lowerCAmelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase = iter_sql_file(lowerCAmelCase__ )
lowercase = iter_sql_file(lowerCAmelCase__ )
for rowa, rowa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = os.path.join(lowerCAmelCase__ , """tmp.sql""" )
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
SqlDatasetWriter(lowerCAmelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase = iter_sql_file(lowerCAmelCase__ )
lowercase = iter_sql_file(lowerCAmelCase__ )
for rowa, rowa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = os.path.join(lowerCAmelCase__ , """tmp.sql""" )
lowercase = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowerCAmelCase__ ).read()
with pytest.raises(lowerCAmelCase__ ):
SqlDatasetWriter(lowerCAmelCase__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 197
| 1
|
'''simple docstring'''
def __UpperCamelCase( _A : str , _A : str = " " ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Any = 0
for index, char in enumerate(_A ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase__ : Union[str, Any] = index + 1
elif index + 1 == len(_A ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 614
|
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None , _A : Dict="no" , _A : List[str]="29500" ):
'''simple docstring'''
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase__ : List[str] = True
elif "IPython" in sys.modules:
UpperCAmelCase__ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase__ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[Any] = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
UpperCAmelCase__ : str = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase__ : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase( _A : List[str] , _A : Optional[Any]=() , _A : str=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase__ : Optional[int] = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 614
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a__( snake_case__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Optional[int]:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_mask
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =intermediate_size
snake_case__ =hidden_act
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_vocab_size
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =scope
def _lowercase ( self ) -> str:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =None
if self.use_input_mask:
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
snake_case__ =DistilBertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case__ =model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case__ =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =DistilBertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case__ =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =DistilBertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case__ =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
snake_case__ =self.num_labels
snake_case__ =DistilBertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case__ =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
snake_case__ =self.num_labels
snake_case__ =DistilBertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case__ =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =self.num_choices
snake_case__ =DistilBertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case__ =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.prepare_config_and_inputs()
(snake_case__) =config_and_inputs
snake_case__ ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__( snake_case__ , snake_case__ , unittest.TestCase ):
a_ : Tuple = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a_ : Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : str = True
a_ : str = True
a_ : Union[str, Any] = True
a_ : List[Any] = True
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =DistilBertModelTester(self )
snake_case__ =ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def _lowercase ( self ) -> int:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
@slow
def _lowercase ( self ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ =DistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def _lowercase ( self ) -> Dict:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case__ =True
snake_case__ =model_class(config=UpperCAmelCase__ )
snake_case__ =self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case__ =torch.jit.trace(
UpperCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'traced_model.pt' ) )
snake_case__ =torch.jit.load(os.path.join(UpperCAmelCase__ , 'traced_model.pt' ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict['input_ids'].to(UpperCAmelCase__ ) , inputs_dict['attention_mask'].to(UpperCAmelCase__ ) )
@require_torch
class a__( unittest.TestCase ):
@slow
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =DistilBertModel.from_pretrained('distilbert-base-uncased' )
snake_case__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case__ =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
snake_case__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
snake_case__ =torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 718
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class a__( snake_case__ ):
a_ : Dict = '''pix2struct_text_model'''
a_ : Optional[int] = ['''past_key_values''']
a_ : int = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=5_0244 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =d_kv
snake_case__ =d_ff
snake_case__ =num_layers
snake_case__ =num_heads
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =dropout_rate
snake_case__ =layer_norm_epsilon
snake_case__ =initializer_factor
snake_case__ =use_cache
snake_case__ =eos_token_id
snake_case__ =decoder_start_token_id
# for backwards compatibility
snake_case__ =dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : List[Any] = '''pix2struct_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=768 , _UpperCAmelCase=2048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=1.0 , _UpperCAmelCase=4096 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , **_UpperCAmelCase , ) -> int:
super().__init__(**_UpperCAmelCase )
snake_case__ =hidden_size
snake_case__ =patch_embed_hidden_size
snake_case__ =d_ff
snake_case__ =dropout_rate
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =initializer_range
snake_case__ =initializer_factor
snake_case__ =attention_dropout
snake_case__ =layer_norm_eps
snake_case__ =dense_act_fn
snake_case__ =seq_len
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =d_kv
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : Dict = '''pix2struct'''
a_ : Optional[int] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
snake_case__ ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
snake_case__ ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
snake_case__ =PixaStructTextConfig(**_UpperCAmelCase )
snake_case__ =PixaStructVisionConfig(**_UpperCAmelCase )
snake_case__ =self.text_config.decoder_start_token_id
snake_case__ =self.text_config.pad_token_id
snake_case__ =self.text_config.eos_token_id
snake_case__ =initializer_factor
snake_case__ =initializer_range
snake_case__ =self.initializer_range
snake_case__ =self.initializer_range
snake_case__ =is_vqa
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =copy.deepcopy(self.__dict__ )
snake_case__ =self.text_config.to_dict()
snake_case__ =self.vision_config.to_dict()
snake_case__ =self.__class__.model_type
return output
| 581
| 0
|
from __future__ import annotations
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : int ):
'''simple docstring'''
if len(__lowercase ) < k or k < 0:
raise ValueError('Invalid Input' )
A_ : Dict = sum(array[:k] )
for i in range(len(__lowercase ) - k ):
A_ : Tuple = current_sum - array[i] + array[i + k]
A_ : str = max(__lowercase ,__lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCAmelCase = [randint(-1000, 1000) for i in range(100)]
_UpperCAmelCase = randint(0, 110)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 558
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 42
lowerCamelCase_ = None
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
return F'''`pip install {cls.pip_package or cls.name}`'''
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''optuna'''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_optuna(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_optuna(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''ray'''
lowerCamelCase_ = '''\'ray[tune]\''''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_ray(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_ray(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''sigopt'''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_sigopt(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_sigopt(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wandb'''
@staticmethod
def lowerCAmelCase_ ( ):
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
return run_hp_search_wandb(lowercase , lowercase , lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return default_hp_space_wandb(lowercase )
_UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
'''simple docstring'''
A_ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
A_ : List[str] = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'''{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 558
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ :Tuple , __magic_name__ :Optional[int] ):
if b == 0:
return (1, 0)
(UpperCAmelCase_) = extended_euclid(__magic_name__ , a % b )
UpperCAmelCase_ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] ):
(UpperCAmelCase_) = extended_euclid(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = na * na
UpperCAmelCase_ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( __magic_name__ :List[str] , __magic_name__ :Any ):
(UpperCAmelCase_) = extended_euclid(__magic_name__ , __magic_name__ )
if b < 0:
UpperCAmelCase_ = (b % n + n) % n
return b
def _lowerCAmelCase ( __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :int ):
UpperCAmelCase_ = invert_modulo(__magic_name__ , __magic_name__ ), invert_modulo(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = na * na
UpperCAmelCase_ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 719
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
UpperCAmelCase_ = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method='''nm''' )
UpperCAmelCase_ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = regressor.predict(__magic_name__ )
return y_pred[0]
def _lowerCAmelCase ( __magic_name__ :list ):
train_user.sort()
UpperCAmelCase_ = np.percentile(__magic_name__ , 2_5 )
UpperCAmelCase_ = np.percentile(__magic_name__ , 7_5 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :float ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_lowerCamelCase : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_lowerCamelCase : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : List[str] = normalize_df[:, 2].tolist()
_lowerCamelCase : Dict = normalize_df[:, 0].tolist()
_lowerCamelCase : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : Any = x[: len(x) - 1]
_lowerCamelCase : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : List[str] = total_date[: len(total_date) - 1]
_lowerCamelCase : Any = total_user[: len(total_user) - 1]
_lowerCamelCase : Dict = total_match[: len(total_match) - 1]
_lowerCamelCase : Any = total_date[len(total_date) - 1 :]
_lowerCamelCase : List[str] = total_user[len(total_user) - 1 :]
_lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : List[str] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 407
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _A , _A , unittest.TestCase):
"""simple docstring"""
a__ : List[str] = IFPipeline
a__ : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
a__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case_ ( self : Dict ) -> Optional[Any]:
return self._get_dummy_components()
def snake_case_ ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=0 ) -> List[str]:
if str(__lowerCAmelCase ).startswith('''mps''' ):
_A = torch.manual_seed(__lowerCAmelCase )
else:
_A = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case_ ( self : str ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case_ ( self : Union[str, Any] ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case_ ( self : Union[str, Any] ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
self._test_save_load_local()
def snake_case_ ( self : Dict ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case_ ( self : Dict ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Tuple ) -> str:
# if
_A = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
_A = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
_A , _A = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A = None
_A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A = IFImgaImgPipeline(**pipe_a.components )
_A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A = IFInpaintingPipeline(**pipe_a.components )
_A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , original_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__lowerCAmelCase )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , num_inference_steps=2 , generator=__lowerCAmelCase , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__lowerCAmelCase )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__lowerCAmelCase )
_A = pipe_a(
prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , original_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 2
|
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 0
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
UpperCAmelCase__ : Optional[int] = deepcopy(_UpperCAmelCase )
elif os.path.exists(_UpperCAmelCase ):
with io.open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : Union[str, Any] = json.load(_UpperCAmelCase )
else:
try:
UpperCAmelCase__ : List[Any] = baseaa.urlsafe_baadecode(_UpperCAmelCase ).decode('''utf-8''' )
UpperCAmelCase__ : Optional[int] = json.loads(_UpperCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
UpperCAmelCase__ : Optional[Any] = config
self.set_stage_and_offload()
def lowerCamelCase ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
UpperCAmelCase__ : int = self.get_value('''zero_optimization.stage''' , -1 )
# offload
UpperCAmelCase__ : List[Any] = False
if self.is_zeroa() or self.is_zeroa():
UpperCAmelCase__ : Optional[Any] = set(['''cpu''', '''nvme'''] )
UpperCAmelCase__ : str = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
UpperCAmelCase__ : Tuple = True
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Any = self.config
# find the config node of interest if it exists
UpperCAmelCase__ : str = ds_key_long.split('''.''' )
UpperCAmelCase__ : Any = nodes.pop()
for node in nodes:
UpperCAmelCase__ : List[Any] = config.get(_UpperCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.find_config_node(_UpperCAmelCase )
if config is None:
return default
return config.get(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
UpperCAmelCase__ : List[str] = self.config
# find the config node of interest if it exists
UpperCAmelCase__ : Union[str, Any] = ds_key_long.split('''.''' )
for node in nodes:
UpperCAmelCase__ : List[Any] = config
UpperCAmelCase__ : str = config.get(_UpperCAmelCase )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = self.get_value(_UpperCAmelCase )
return False if value is None else bool(_UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = self.get_value(_UpperCAmelCase )
return False if value is None else not bool(_UpperCAmelCase )
def lowerCamelCase ( self ):
return self._stage == 2
def lowerCamelCase ( self ):
return self._stage == 3
def lowerCamelCase ( self ):
return self._offload
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
UpperCAmelCase__ : int = engine
def lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
# runs backpropagation and handles mixed precision
self.engine.backward(_UpperCAmelCase , **_UpperCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , device_placement=_UpperCAmelCase , scaler=_UpperCAmelCase )
UpperCAmelCase__ : Union[str, Any] = hasattr(self.optimizer , '''overflow''' )
def lowerCamelCase ( self , _UpperCAmelCase=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCamelCase ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=0.0_01 , _UpperCAmelCase=0 , **_UpperCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = params
UpperCAmelCase__ : str = lr
UpperCAmelCase__ : Union[str, Any] = weight_decay
UpperCAmelCase__ : Any = kwargs
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=0 , **_UpperCAmelCase ):
UpperCAmelCase__ : Dict = optimizer
UpperCAmelCase__ : str = total_num_steps
UpperCAmelCase__ : Dict = warmup_num_steps
UpperCAmelCase__ : Any = kwargs
| 599
|
'''simple docstring'''
def lowerCAmelCase__ ( a_ : float , a_ : list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
UpperCAmelCase__ : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(a_ ) )
return round(a_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__ :
"""simple docstring"""
a__ : str
a__ : str = None
@staticmethod
def snake_case_ ( ) -> List[str]:
raise NotImplementedError
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : str ) -> int:
raise NotImplementedError
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : int ) -> List[Any]:
raise NotImplementedError
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def snake_case_ ( cls : Optional[Any] ) -> List[str]:
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = "optuna"
@staticmethod
def snake_case_ ( ) -> int:
return is_optuna_available()
def snake_case_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ) -> Any:
return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Tuple ) -> Dict:
return default_hp_space_optuna(__lowerCAmelCase )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = "ray"
a__ : Tuple = "'ray[tune]'"
@staticmethod
def snake_case_ ( ) -> List[str]:
return is_ray_available()
def snake_case_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ) -> Optional[int]:
return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> int:
return default_hp_space_ray(__lowerCAmelCase )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Optional[int] = "sigopt"
@staticmethod
def snake_case_ ( ) -> Optional[Any]:
return is_sigopt_available()
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ) -> List[str]:
return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : str , __lowerCAmelCase : Dict ) -> int:
return default_hp_space_sigopt(__lowerCAmelCase )
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = "wandb"
@staticmethod
def snake_case_ ( ) -> int:
return is_wandb_available()
def snake_case_ ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : List[str] ) -> Dict:
return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> Dict:
return default_hp_space_wandb(__lowerCAmelCase )
UpperCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_A = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_snake_case ) > 0:
_A = available_backends[0].name
if len(_snake_case ) > 1:
logger.info(
F'''{len(_snake_case )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 2
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Tuple ) -> Optional[int]:
_A = tempfile.mkdtemp()
_A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_A = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : Dict , **__lowerCAmelCase : int ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : str , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Tuple , **__lowerCAmelCase : str ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ) -> Optional[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Dict ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_A = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def snake_case_ ( self : List[Any] ) -> List[str]:
_A = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = processor(text=__lowerCAmelCase )
_A = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def snake_case_ ( self : Optional[Any] ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(__lowerCAmelCase )
_A = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def snake_case_ ( self : str ) -> str:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = AlignProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_A = '''lower newer'''
_A = self.prepare_image_inputs()
_A = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 2
| 1
|
import argparse
import copy
def UpperCamelCase_( __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :int = {}
with open(__magic_name__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase :Any = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase :Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase :str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase :Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Optional[Any] ):
"""simple docstring"""
with open(__magic_name__ ) as f:
_lowerCAmelCase :Dict = f.read(1 )
_lowerCAmelCase :int = start_node
_lowerCAmelCase :Any = []
_lowerCAmelCase :Tuple = start_node
_lowerCAmelCase :Dict = 0
while visiting not in first_solution:
_lowerCAmelCase :Union[str, Any] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__magic_name__ ) and k[0] not in first_solution:
_lowerCAmelCase :List[str] = k[1]
_lowerCAmelCase :Union[str, Any] = k[0]
first_solution.append(__magic_name__ )
_lowerCAmelCase :Any = distance_of_first_solution + int(__magic_name__ )
_lowerCAmelCase :str = best_node
first_solution.append(__magic_name__ )
_lowerCAmelCase :List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase :Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = []
for n in solution[1:-1]:
_lowerCAmelCase :List[str] = solution.index(__magic_name__ )
for kn in solution[1:-1]:
_lowerCAmelCase :Optional[int] = solution.index(__magic_name__ )
if n == kn:
continue
_lowerCAmelCase :Dict = copy.deepcopy(__magic_name__ )
_lowerCAmelCase :Optional[int] = kn
_lowerCAmelCase :Optional[int] = n
_lowerCAmelCase :Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase :Tuple = _tmp[_tmp.index(__magic_name__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase :Optional[int] = distance + int(i[1] )
_tmp.append(__magic_name__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase :Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __magic_name__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCamelCase_( __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = 1
_lowerCAmelCase :int = first_solution
_lowerCAmelCase :Tuple = []
_lowerCAmelCase :List[str] = distance_of_first_solution
_lowerCAmelCase :List[Any] = solution
while count <= iters:
_lowerCAmelCase :List[str] = find_neighborhood(__magic_name__ , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = 0
_lowerCAmelCase :str = neighborhood[index_of_best_solution]
_lowerCAmelCase :Optional[int] = len(__magic_name__ ) - 1
_lowerCAmelCase :Tuple = False
while not found:
_lowerCAmelCase :Union[str, Any] = 0
while i < len(__magic_name__ ):
if best_solution[i] != solution[i]:
_lowerCAmelCase :Tuple = best_solution[i]
_lowerCAmelCase :Tuple = solution[i]
break
_lowerCAmelCase :Union[str, Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase :Optional[Any] = True
_lowerCAmelCase :Optional[Any] = best_solution[:-1]
_lowerCAmelCase :Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase :List[Any] = cost
_lowerCAmelCase :int = solution
else:
_lowerCAmelCase :Dict = index_of_best_solution + 1
_lowerCAmelCase :Tuple = neighborhood[index_of_best_solution]
if len(__magic_name__ ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase :Dict = count + 1
return best_solution_ever, best_cost
def UpperCamelCase_( __magic_name__ : Optional[Any]=None ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = generate_neighbours(args.File )
_lowerCAmelCase :Tuple = generate_first_solution(
args.File , __magic_name__ )
_lowerCAmelCase :Union[str, Any] = tabu_search(
__magic_name__ , __magic_name__ , __magic_name__ , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 708
|
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__magic_name__ , 2 ) + pow(__magic_name__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 382
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( _a ):
"""simple docstring"""
snake_case = ["image_processor", "tokenizer"]
snake_case = "FlavaImageProcessor"
snake_case = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
A_ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
A_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
A_ : Tuple = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if images is not None:
A_ : int = self.image_processor(
lowercase__ , return_image_mask=lowercase__ , return_codebook_pixels=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if text is not None and images is not None:
encoding.update(lowercase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Union[str, Any] = self.tokenizer.model_input_names
A_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self )->List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 590
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 0
|
class _a :
'''simple docstring'''
def __init__( self ):
__A : Union[str, Any] = ""
__A : Optional[Any] = ""
__A : Dict = []
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__A : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__A : int = self.__min_dist_top_down_dp(__A , n - 1 )
__A : Optional[int] = self.__min_dist_top_down_dp(m - 1 , __A )
__A : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__A : Dict = 1 + min(__A , __A , __A )
return self.dp[m][n]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : str = worda
__A : Any = worda
__A : Optional[Any] = [[-1 for _ in range(len(__A ) )] for _ in range(len(__A ) )]
return self.__min_dist_top_down_dp(len(__A ) - 1 , len(__A ) - 1 )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[int] = worda
__A : str = worda
__A : Optional[Any] = len(__A )
__A : Tuple = len(__A )
__A : Dict = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__A : Any = j
elif j == 0: # second string is empty
__A : str = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__A : Dict = self.dp[i - 1][j - 1]
else:
__A : Optional[Any] = self.dp[i][j - 1]
__A : Optional[Any] = self.dp[i - 1][j]
__A : int = self.dp[i - 1][j - 1]
__A : List[Any] = 1 + min(__A , __A , __A )
return self.dp[m][n]
if __name__ == "__main__":
UpperCamelCase = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
UpperCamelCase = input('Enter the first string: ').strip()
UpperCamelCase = input('Enter the second string: ').strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 714
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def __UpperCAmelCase( self , __UpperCAmelCase = "auto" ):
if slice_size == "auto":
__A : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def __UpperCAmelCase( self ):
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=16_000 , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 50 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , **__UpperCAmelCase , ):
__A : List[str] = self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="pt" , sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
__A : Any = self.speech_model.generate(__UpperCAmelCase , max_length=480_000 )
__A : List[str] = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[Any] = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCAmelCase )}." )
# get prompt text embeddings
__A : Optional[int] = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__A : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__A : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__A : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
__A : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__A , __A , __A : str = text_embeddings.shape
__A : Optional[int] = text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
__A : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__A : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__A : List[str]
if negative_prompt is None:
__A : Dict = [""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="
F" {type(__UpperCAmelCase )}." )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__A : Any = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
__A : int = negative_prompt
__A : int = text_input_ids.shape[-1]
__A : Any = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
__A : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__A : Union[str, Any] = uncond_embeddings.shape[1]
__A : List[str] = uncond_embeddings.repeat(1 , __UpperCAmelCase , 1 )
__A : int = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__A : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__A : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__A : Tuple = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
__A : List[Any] = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
__A : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__A : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__A : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__A : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__A : List[str] = {}
if accepts_eta:
__A : Tuple = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__A : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
__A : List[Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__A , __A : str = noise_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__A : Union[str, Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__A : int = 1 / 0.1_82_15 * latents
__A : Union[str, Any] = self.vae.decode(__UpperCAmelCase ).sample
__A : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__A : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : List[str] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 387
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase_ )] )
lowercase = np.array(lowerCAmelCase_ )
lowercase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase_ ) ) , x.transpose() ) , lowerCAmelCase_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = (1, 2, 1)
lowercase = (1, 1, 0, 7)
lowercase = SARIMAX(
lowerCAmelCase_ , exog=lowerCAmelCase_ , order=lowerCAmelCase_ , seasonal_order=lowerCAmelCase_ )
lowercase = model.fit(disp=lowerCAmelCase_ , maxiter=600 , method="nm" )
lowercase = model_fit.predict(1 , len(lowerCAmelCase_ ) , exog=[test_match] )
return result[0]
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = regressor.predict(lowerCAmelCase_ )
return y_pred[0]
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
train_user.sort()
lowercase = np.percentile(lowerCAmelCase_ , 25 )
lowercase = np.percentile(lowerCAmelCase_ , 75 )
lowercase = qa - qa
lowercase = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 0
lowercase = 0
for i in list_vote:
if i > actual_result:
lowercase = not_safe + 1
else:
if abs(abs(lowerCAmelCase_ ) - abs(lowerCAmelCase_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowerCamelCase : List[Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__lowerCamelCase : Tuple = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__lowerCamelCase : Any = Normalizer().fit_transform(data_input_df.values)
# split data
__lowerCamelCase : List[str] = normalize_df[:, 2].tolist()
__lowerCamelCase : int = normalize_df[:, 0].tolist()
__lowerCamelCase : Union[str, Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowerCamelCase : str = normalize_df[:, [1, 2]].tolist()
__lowerCamelCase : Optional[int] = x[: len(x) - 1]
__lowerCamelCase : str = x[len(x) - 1 :]
# for linear regression & sarimax
__lowerCamelCase : List[Any] = total_date[: len(total_date) - 1]
__lowerCamelCase : Tuple = total_user[: len(total_user) - 1]
__lowerCamelCase : Tuple = total_match[: len(total_match) - 1]
__lowerCamelCase : Dict = total_date[len(total_date) - 1 :]
__lowerCamelCase : str = total_user[len(total_user) - 1 :]
__lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowerCamelCase : Any = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowerCamelCase : Dict = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 310
|
'''simple docstring'''
from math import sqrt
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 0
for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase_ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase_ ):
total += i
return total - n
def UpperCAmelCase_ ( lowerCAmelCase_ = 1_0000 ):
"""simple docstring"""
lowercase = sum(
i
for i in range(1 , lowerCAmelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Initialise PyTorch model
_UpperCamelCase = BigBirdConfig.from_json_file(lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
_UpperCamelCase = BigBirdForQuestionAnswering(lowerCAmelCase )
else:
_UpperCamelCase = BigBirdForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(lowerCAmelCase , lowerCAmelCase , is_trivia_qa=lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
lowercase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 105
|
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = 0
for ch in input_str:
_UpperCamelCase = ord(lowerCAmelCase )
_UpperCamelCase = pow(2 , lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 2
@register_to_config
def __init__( self : Optional[Any] , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 100 , _lowerCAmelCase : float = 1.007 , _lowerCAmelCase : float = 80 , _lowerCAmelCase : float = 0.05 , _lowerCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ = sigma_max
# setable values
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None # sigma(t_i)
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[int] = None ):
return sample
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ):
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = np.arange(0 , self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE_ = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase , dtype=torch.floataa , device=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : float , _lowerCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE_ = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE_ = self.config.s_noise * randn_tensor(sample.shape , generator=_lowerCAmelCase ).to(sample.device )
SCREAMING_SNAKE_CASE_ = sigma + gamma * sigma
SCREAMING_SNAKE_CASE_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE_ = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE_ = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError()
| 31
|
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 31
| 1
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 715
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =CTRLTokenizer
__lowerCAmelCase : int =False
__lowerCAmelCase : Union[str, Any] =False
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def UpperCamelCase__ ( self :Union[str, Any], **snake_case :Optional[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :Tuple, snake_case :str):
"""simple docstring"""
_lowercase ='adapt react readapt apt'
_lowercase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase ='adapt react readapt apt'
_lowercase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case, snake_case)
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case), snake_case)
| 557
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def a ( A__ : np.ndarray , A__ : Union[int, Iterable[int]] , A__ : bool , A__ : int ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(A__ : int , A__ : Optional[int] , A__ : Optional[Any]=0 , A__ : str=None ):
_lowercase =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase =math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase =math.ceil(val / multiple ) * multiple
return x
_lowercase =(output_size, output_size) if isinstance(_a , _a ) else output_size
_lowercase , _lowercase =get_image_size(_a )
_lowercase , _lowercase =output_size
# determine new height and width
_lowercase =output_height / input_height
_lowercase =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase =scale_width
else:
# fit height
_lowercase =scale_height
_lowercase =constraint_to_multiple_of(scale_height * input_height , multiple=_a )
_lowercase =constraint_to_multiple_of(scale_width * input_width , multiple=_a )
return (new_height, new_width)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
_a = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = False , lowerCAmelCase = 1 , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
_lowercase =size if size is not None else {'height': 384, 'width': 384}
_lowercase =get_size_dict(SCREAMING_SNAKE_CASE__ )
_lowercase =do_resize
_lowercase =size
_lowercase =keep_aspect_ratio
_lowercase =ensure_multiple_of
_lowercase =resample
_lowercase =do_rescale
_lowercase =rescale_factor
_lowercase =do_normalize
_lowercase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = 1 , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = None , **lowerCAmelCase , ) -> int:
'''simple docstring'''
_lowercase =get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowercase =get_resize_output_image_size(
SCREAMING_SNAKE_CASE__ , output_size=(size['height'], size['width']) , keep_aspect_ratio=SCREAMING_SNAKE_CASE__ , multiple=SCREAMING_SNAKE_CASE__ , )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ) -> str:
'''simple docstring'''
_lowercase =do_resize if do_resize is not None else self.do_resize
_lowercase =size if size is not None else self.size
_lowercase =get_size_dict(SCREAMING_SNAKE_CASE__ )
_lowercase =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase =resample if resample is not None else self.resample
_lowercase =do_rescale if do_rescale is not None else self.do_rescale
_lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase =do_normalize if do_normalize is not None else self.do_normalize
_lowercase =image_mean if image_mean is not None else self.image_mean
_lowercase =image_std if image_std is not None else self.image_std
_lowercase =make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowercase =[to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
_lowercase =[self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
_lowercase =[self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
_lowercase =[self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
_lowercase =[to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
_lowercase ={'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
_lowercase =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
_lowercase =target_sizes.numpy()
_lowercase =[]
for idx in range(len(SCREAMING_SNAKE_CASE__ ) ):
_lowercase =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE__ )
_lowercase =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE__ )
else:
_lowercase =logits.argmax(dim=1 )
_lowercase =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 291
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : int=64 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def _UpperCamelCase ( self : Dict ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = MPNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCamelCase__ = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs
lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Any = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a_ : Optional[Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] = False
a_ : Any = True
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = MPNetModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE__ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[int] ):
lowerCamelCase__ = MPNetModel.from_pretrained('microsoft/mpnet-base' )
lowerCamelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 510
| 0
|
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
if exponent == 1:
return base
if exponent % 2 == 0:
A =_modexpt(a_ , exponent // 2 , a_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a_ , exponent - 1 , a_ )) % modulo_value
def UpperCamelCase_ ( a_ = 1777 , a_ = 1855 , a_ = 8 ) ->int:
A =base
for _ in range(1 , a_ ):
A =_modexpt(a_ , a_ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ["""ConvNextFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Dict = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 141
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase=125 , lowerCamelCase=None , **lowerCamelCase , ) ->None:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a = [F"""<extra_id_{i}>""" for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a = len(set(filter(lambda lowerCamelCase : bool('extra_id' in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
super().__init__(
eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = extra_ids
__a = 2**8 # utf is 8 bits
# define special tokens dict
__a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__a = len(self.special_tokens_encoder )
__a = len(lowerCamelCase )
for i, token in enumerate(lowerCamelCase ):
__a = self.vocab_size + i - n
__a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __UpperCamelCase ( self , lowerCamelCase ) ->List[int]:
'''simple docstring'''
if len(lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = self._add_eos_if_not_present(lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__a = self._add_eos_if_not_present(lowerCamelCase )
return token_ids_a + token_ids_a
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = [chr(lowerCamelCase ) for i in text.encode('utf-8' )]
return tokens
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
__a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__a = self.added_tokens_encoder[token]
elif len(lowerCamelCase ) != 1:
__a = self.unk_token_id
else:
__a = ord(lowerCamelCase ) + self._num_special_tokens
return token_id
def __UpperCamelCase ( self , lowerCamelCase ) ->Tuple:
'''simple docstring'''
if index in self.special_tokens_decoder:
__a = self.special_tokens_decoder[index]
else:
__a = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
__a = B''
for token in tokens:
if token in self.special_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
__a = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
__a = token.encode('utf-8' )
else:
__a = bytes([ord(lowerCamelCase )] )
bstring += tok_string
__a = bstring.decode('utf-8' , errors='ignore' )
return string
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
return ()
| 448
| 0
|
'''simple docstring'''
def _a ( __UpperCamelCase ):
if len(__UpperCamelCase ) <= 1:
return [tuple(__UpperCamelCase )]
a_ : List[Any] = []
def generate(__UpperCamelCase , __UpperCamelCase ):
a_ : Optional[Any] = [0] * n
res.append(tuple(__UpperCamelCase ) )
a_ : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a_ : Optional[int] = arr[i], arr[0]
else:
a_ : Dict = arr[i], arr[c[i]]
res.append(tuple(__UpperCamelCase ) )
c[i] += 1
a_ : List[str] = 0
else:
a_ : List[Any] = 0
i += 1
generate(len(__UpperCamelCase ) , __UpperCamelCase )
return res
if __name__ == "__main__":
__lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 720
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCamelCase = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__lowerCamelCase = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__lowerCamelCase = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__lowerCamelCase = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _a ( ):
a_ , a_ : List[Any] = randrange(len(__UpperCamelCase ) ), randrange(len(__UpperCamelCase ) )
a_ : List[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
a_ , a_ : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a ( __UpperCamelCase = 1_0_0 ):
return (generate_random_hand() for _ in range(__UpperCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Union[str, Any] = PokerHand(__UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
def _a ( ):
a_ : int = [PokerHand(__UpperCamelCase ) for hand in SORTED_HANDS]
a_ : Dict = poker_hands.copy()
shuffle(__UpperCamelCase )
a_ : Dict = chain(sorted(__UpperCamelCase ) )
for index, hand in enumerate(__UpperCamelCase ):
assert hand == poker_hands[index]
def _a ( ):
# Test that five high straights are compared correctly.
a_ : str = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a_ : List[str] = PokerHand("""2C 4S AS 3D 5C""" )
a_ : Dict = True
a_ : List[Any] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a_ : Tuple = 0
a_ : Optional[Any] = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
a_ : Optional[int] = os.path.join(__UpperCamelCase , """poker_hands.txt""" )
with open(__UpperCamelCase ) as file_hand:
for line in file_hand:
a_ : Dict = line[:1_4].strip()
a_ : int = line[1_5:].strip()
a_ , a_ : Optional[int] = PokerHand(__UpperCamelCase ), PokerHand(__UpperCamelCase )
a_ : str = player.compare_with(__UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 478
| 0
|
UpperCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def SCREAMING_SNAKE_CASE_ ( _snake_case :float , _snake_case :str , _snake_case :str ) -> float:
_A = from_type.lower().strip('''s''' )
_A = to_type.lower().strip('''s''' )
_A = UNIT_SYMBOL.get(_snake_case , _snake_case )
_A = UNIT_SYMBOL.get(_snake_case , _snake_case )
if from_sanitized not in METRIC_CONVERSION:
_A = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
_A = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
_A = METRIC_CONVERSION[from_sanitized]
_A = METRIC_CONVERSION[to_sanitized]
_A = 1
if from_exponent > to_exponent:
_A = from_exponent - to_exponent
else:
_A = -(to_exponent - from_exponent)
return value * pow(10 , _snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669
| 0
|
import qiskit
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase_ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCamelCase_ = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
__A =half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = (16, 32, 96, 2_56)
lowerCAmelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ = self.block_out_channels[i]
lowerCamelCase_ = self.block_out_channels[i + 1]
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase )
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase )
lowerCamelCase_ = blocks
lowerCamelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = self.conv_in(lowercase )
lowerCamelCase_ = nn.silu(lowercase )
for block in self.blocks:
lowerCamelCase_ = block(lowercase )
lowerCamelCase_ = nn.silu(lowercase )
lowerCamelCase_ = self.conv_out(lowercase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , snake_case_ , snake_case_ ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = False
lowerCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 12_80
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = "rgb"
lowerCAmelCase__ = (16, 32, 96, 2_56)
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> FrozenDict:
# init input tensors
lowerCamelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ = jnp.zeros(lowercase , dtype=jnp.floataa )
lowerCamelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ = jnp.zeros(lowercase , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ = jax.random.split(lowercase )
lowerCamelCase_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowercase , lowercase , lowercase , lowercase , lowercase )["params"]
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.block_out_channels
lowerCamelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ = FlaxTimestepEmbedding(lowercase , dtype=self.dtype )
lowerCamelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ = self.only_cross_attention
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = block_out_channels[0]
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ = output_channel
lowerCamelCase_ = block_out_channels[i]
lowerCamelCase_ = i == len(lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ = FlaxDownBlockaD(
in_channels=lowercase , out_channels=lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase )
for _ in range(self.layers_per_block ):
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase )
if not is_final_block:
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase )
lowerCamelCase_ = down_blocks
lowerCamelCase_ = controlnet_down_blocks
# mid
lowerCamelCase_ = block_out_channels[-1]
lowerCamelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ = nn.Conv(
lowercase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = True , lowercase = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ = jnp.flip(lowercase , axis=1 )
# 1. time
if not isinstance(lowercase , jnp.ndarray ):
lowerCamelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ = jnp.expand_dims(lowercase , 0 )
lowerCamelCase_ = self.time_proj(lowercase )
lowerCamelCase_ = self.time_embedding(lowercase )
# 2. pre-process
lowerCamelCase_ = jnp.transpose(lowercase , (0, 2, 3, 1) )
lowerCamelCase_ = self.conv_in(lowercase )
lowerCamelCase_ = jnp.transpose(lowercase , (0, 2, 3, 1) )
lowerCamelCase_ = self.controlnet_cond_embedding(lowercase )
sample += controlnet_cond
# 3. down
lowerCamelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase , lowercase ):
lowerCamelCase_ , lowerCamelCase_ = down_block(lowercase , lowercase , lowercase , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ = down_block(lowercase , lowercase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ = self.mid_block(lowercase , lowercase , lowercase , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowercase , self.controlnet_down_blocks ):
lowerCamelCase_ = controlnet_block(lowercase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ = controlnet_down_block_res_samples
lowerCamelCase_ = self.controlnet_mid_block(lowercase )
# 6. scaling
lowerCamelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase , mid_block_res_sample=lowercase )
| 313
| 1
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Tuple = ["""pixel_values"""]
def __init__(self : int , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : int = 8 , **__a : int , ):
super().__init__(**__a )
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = pad_size
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : int , __a : Optional[Union[str, ChannelDimension]] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(__a )
UpperCAmelCase_ = (old_height // size + 1) * size - old_height
UpperCAmelCase_ = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__a )
def _lowercase (self : Tuple , __a : ImageInput , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ):
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase_ = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase_ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_pad:
UpperCAmelCase_ = [self.pad(__a , size=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 78
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""MobileViTFeatureExtractor"""]
__lowerCAmelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( __magic_name__ = 1_0 ) ->str:
if not isinstance(__magic_name__ , __magic_name__ ) or n < 0:
raise ValueError("Invalid input" )
__lowercase = 1_0**n
__lowercase = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __magic_name__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }")
| 118
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowercase = get_logger(__name__)
class __a ( enum.Enum ):
'''simple docstring'''
_lowerCamelCase : Tuple = """all_checks"""
_lowerCamelCase : Optional[int] = """basic_checks"""
_lowerCamelCase : str = """no_checks"""
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=None ) ->Union[str, Any]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = " for " + verification_name if verification_name is not None else ""
if len(__magic_name__ ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
class __a ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise ExpectedMoreSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
if len(set(__magic_name__ ) - set(__magic_name__ ) ) > 0:
raise UnexpectedSplits(str(set(__magic_name__ ) - set(__magic_name__ ) ) )
__lowercase = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__magic_name__ ) > 0:
raise NonMatchingSplitsSizesError(str(__magic_name__ ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ = True ) ->dict:
if record_checksum:
__lowercase = shaaaa()
with open(__magic_name__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ):
m.update(__magic_name__ )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(__magic_name__ ), "checksum": checksum}
def lowerCAmelCase__ ( __magic_name__ ) ->List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 118
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger()
@dataclass
class a :
SCREAMING_SNAKE_CASE__ : nn.Module
SCREAMING_SNAKE_CASE__ : List[nn.Module] = field(default_factory=__lowercase )
SCREAMING_SNAKE_CASE__ : list = field(default_factory=__lowercase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = len(list(m.modules() ) ) == 1 or isinstance(_lowerCAmelCase , nn.Convad ) or isinstance(_lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def snake_case_ ( self ):
"""simple docstring"""
return list(filter(lambda _lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a :
SCREAMING_SNAKE_CASE__ : nn.Module
SCREAMING_SNAKE_CASE__ : nn.Module
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List = field(default_factory=__lowercase )
SCREAMING_SNAKE_CASE__ : List = field(default_factory=__lowercase )
def __call__( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = Tracker(self.dest )(_lowerCAmelCase ).parametrized
__SCREAMING_SNAKE_CASE: Tuple = Tracker(self.src )(_lowerCAmelCase ).parametrized
__SCREAMING_SNAKE_CASE: Dict = list(filter(lambda _lowerCAmelCase : type(_lowerCAmelCase ) not in self.src_skip , _lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Dict = list(filter(lambda _lowerCAmelCase : type(_lowerCAmelCase ) not in self.dest_skip , _lowerCAmelCase ) )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(_lowerCAmelCase )} operations while"""
f""" destination module has {len(_lowerCAmelCase )}.""" )
for dest_m, src_m in zip(_lowerCAmelCase , _lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : ResNetConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Optional[Any]:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Dict = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
__SCREAMING_SNAKE_CASE: Dict = ResNetForImageClassification(UpperCamelCase__ ).eval()
__SCREAMING_SNAKE_CASE: Tuple = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE: Tuple = F"""resnet{"-".join(name.split("resnet" ) )}"""
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
__SCREAMING_SNAKE_CASE: Tuple = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE: Optional[int] = 1_000
__SCREAMING_SNAKE_CASE: Optional[int] = (1, num_labels)
__SCREAMING_SNAKE_CASE: Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE: Any = num_labels
__SCREAMING_SNAKE_CASE: List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE: Tuple = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE: List[Any] = idalabel
__SCREAMING_SNAKE_CASE: Any = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE: int = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: List[str] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCAmelCase : Dict = parser.parse_args()
lowerCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 146
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = parent
__SCREAMING_SNAKE_CASE: List[str] = batch_size
__SCREAMING_SNAKE_CASE: Any = seq_length
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: int = use_input_mask
__SCREAMING_SNAKE_CASE: Union[str, Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Optional[int] = use_labels
__SCREAMING_SNAKE_CASE: List[str] = vocab_size
__SCREAMING_SNAKE_CASE: Any = hidden_size
__SCREAMING_SNAKE_CASE: Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: int = num_attention_heads
__SCREAMING_SNAKE_CASE: Any = intermediate_multiple_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE: str = hidden_dropout
__SCREAMING_SNAKE_CASE: Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE: Optional[int] = weight_tying
__SCREAMING_SNAKE_CASE: List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE: List[Any] = num_labels
__SCREAMING_SNAKE_CASE: List[str] = num_choices
__SCREAMING_SNAKE_CASE: List[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: Tuple = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE: Any = True
return config, input_ids, input_mask, token_labels
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = GPTNeoXJapaneseModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = True
__SCREAMING_SNAKE_CASE: Union[str, Any] = GPTNeoXJapaneseModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = True
__SCREAMING_SNAKE_CASE: Optional[int] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE: str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE: Any = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE: Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE: Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = config_and_inputs
__SCREAMING_SNAKE_CASE: Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = GPTNeoXJapaneseModelTester(self )
__SCREAMING_SNAKE_CASE: str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE: int = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = '''abeja/gpt-neox-japanese-2.7b'''
__SCREAMING_SNAKE_CASE: List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__SCREAMING_SNAKE_CASE: List[Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__SCREAMING_SNAKE_CASE: List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = []
for prompt in prompts:
__SCREAMING_SNAKE_CASE: Any = tokenizer(_lowerCAmelCase , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE: int = model.generate(_lowerCAmelCase , max_length=50 )
__SCREAMING_SNAKE_CASE: str = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 146
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCamelCase : List[str] = None
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : List[Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase : Tuple = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
lowerCamelCase : List[str] = "▁"
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = BigBirdTokenizer
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = []
def __init__( self : Union[str, Any] , A_ : List[str]=None , A_ : Union[str, Any]=None , A_ : Tuple="<unk>" , A_ : List[Any]="<s>" , A_ : str="</s>" , A_ : Optional[int]="<pad>" , A_ : List[Any]="[SEP]" , A_ : Dict="[MASK]" , A_ : List[Any]="[CLS]" , **A_ : Tuple , ) -> int:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : List[str] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 70
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : Union[str, Any]=13 , A_ : List[Any]=30 , A_ : Optional[Any]=2 , A_ : List[str]=3 , A_ : List[str]=True , A_ : Dict=True , A_ : List[Any]=32 , A_ : Any=2 , A_ : Any=4 , A_ : Optional[int]=37 , A_ : Dict="gelu" , A_ : List[Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : str=None , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : Any , A_ : int , A_ : int , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel(config=A_ )
lowerCamelCase_ = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
lowerCamelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self : List[Any] , A_ : List[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 70
| 1
|
import os
import sys
import transformers
__lowerCAmelCase : str = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 705
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self :List[str] , *__magic_name__ :str , __magic_name__ :Dict=None , __magic_name__ :Dict=None , **__magic_name__ :Dict ) -> str:
'''simple docstring'''
super().__init__(*__magic_name__ , **__magic_name__ )
a__ = eval_examples
a__ = post_process_function
def _UpperCamelCase ( self :Tuple , __magic_name__ :Optional[Dataset] = None , __magic_name__ :int=None , __magic_name__ :Optional[List[str]] = None , __magic_name__ :str = "eval" , **__magic_name__ :Optional[Any] , ) -> Dict[str, float]:
'''simple docstring'''
a__ = gen_kwargs.copy()
a__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
a__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
a__ = gen_kwargs
a__ = self.eval_dataset if eval_dataset is None else eval_dataset
a__ = self.get_eval_dataloader(__magic_name__ )
a__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = time.time()
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
__magic_name__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , metric_key_prefix=__magic_name__ , )
finally:
a__ = compute_metrics
a__ = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__magic_name__ , __magic_name__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a__ = self.post_process_function(__magic_name__ , __magic_name__ , __magic_name__ )
a__ = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
a__ = metrics.pop(__magic_name__ )
metrics.update(output.metrics )
else:
a__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__magic_name__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __magic_name__ )
return metrics
def _UpperCamelCase ( self :List[Any] , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Optional[int]=None , __magic_name__ :str = "test" , **__magic_name__ :Union[str, Any] ) -> Any:
'''simple docstring'''
a__ = gen_kwargs.copy()
a__ = self.get_test_dataloader(__magic_name__ )
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = time.time()
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
__magic_name__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__magic_name__ , metric_key_prefix=__magic_name__ , )
finally:
a__ = compute_metrics
a__ = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__magic_name__ , __magic_name__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ = self.post_process_function(__magic_name__ , __magic_name__ , __magic_name__ , '''predict''' )
a__ = self.compute_metrics(__magic_name__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
a__ = metrics.pop(__magic_name__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__magic_name__ )
| 158
| 0
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def snake_case_ ( A_ : Optional[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_lowerCamelCase : Dict = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
_lowerCamelCase : Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) ),
] )
_lowerCamelCase : List[Any] = transform(A_ ).unsqueeze(0 ).to(A_ )
return image
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
if "visual_encoder" in key:
_lowerCamelCase : Dict = re.sub('''visual_encoder*''', '''vision_model.encoder''', A_ )
if "blocks" in key:
_lowerCamelCase : Tuple = re.sub(R'''blocks''', '''layers''', A_ )
if "attn" in key:
_lowerCamelCase : str = re.sub(R'''attn''', '''self_attn''', A_ )
if "norm1" in key:
_lowerCamelCase : Optional[int] = re.sub(R'''norm1''', '''layer_norm1''', A_ )
if "norm2" in key:
_lowerCamelCase : int = re.sub(R'''norm2''', '''layer_norm2''', A_ )
if "encoder.norm" in key:
_lowerCamelCase : List[str] = re.sub(R'''encoder.norm''', '''post_layernorm''', A_ )
if "encoder.patch_embed.proj" in key:
_lowerCamelCase : str = re.sub(R'''encoder.patch_embed.proj''', '''embeddings.patch_embedding''', A_ )
if "encoder.pos_embed" in key:
_lowerCamelCase : List[str] = re.sub(R'''encoder.pos_embed''', '''embeddings.position_embedding''', A_ )
if "encoder.cls_token" in key:
_lowerCamelCase : int = re.sub(R'''encoder.cls_token''', '''embeddings.class_embedding''', A_ )
if "self_attn" in key:
_lowerCamelCase : Any = re.sub(R'''self_attn.proj''', '''self_attn.projection''', A_ )
return key
@torch.no_grad()
def snake_case_ ( A_ : Union[str, Any], A_ : Any=None ):
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : str = BlipConfig.from_pretrained(A_ )
else:
_lowerCamelCase : Any = BlipConfig(projection_dim=5_12, text_config={}, vision_config={} )
_lowerCamelCase : Optional[int] = BlipForConditionalGeneration(A_ ).eval()
_lowerCamelCase : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_lowerCamelCase : Tuple = blip_decoder(pretrained=A_, image_size=3_84, vit='''base''' )
_lowerCamelCase : str = pt_model.eval()
_lowerCamelCase : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : int = modified_state_dict.pop(A_ )
_lowerCamelCase : int = rename_key(A_ )
_lowerCamelCase : List[str] = value
hf_model.load_state_dict(A_ )
_lowerCamelCase : Union[str, Any] = 3_84
_lowerCamelCase : Optional[int] = load_demo_image(image_size=A_, device='''cpu''' )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowerCamelCase : Optional[int] = tokenizer(['''a picture of'''] ).input_ids
_lowerCamelCase : int = hf_model.generate(A_, A_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCamelCase : Dict = hf_model.generate(A_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCamelCase : Union[str, Any] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_lowerCamelCase : Union[str, Any] = blip_vqa(pretrained=A_, image_size=A_, vit='''base''' )
vqa_model.eval()
_lowerCamelCase : Any = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : Any = modified_state_dict.pop(A_ )
_lowerCamelCase : Tuple = rename_key(A_ )
_lowerCamelCase : List[str] = value
_lowerCamelCase : List[str] = BlipForQuestionAnswering(A_ )
hf_vqa_model.load_state_dict(A_ )
_lowerCamelCase : Optional[int] = ['''How many dogs are in this image?''']
_lowerCamelCase : Dict = tokenizer(A_, return_tensors='''pt''' ).input_ids
_lowerCamelCase : List[Any] = hf_vqa_model.generate(A_, A_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_lowerCamelCase : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_lowerCamelCase : Optional[int] = blip_itm(pretrained=A_, image_size=A_, vit='''base''' )
itm_model.eval()
_lowerCamelCase : Optional[int] = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : Any = modified_state_dict.pop(A_ )
_lowerCamelCase : Optional[Any] = rename_key(A_ )
_lowerCamelCase : str = value
_lowerCamelCase : Union[str, Any] = BlipForImageTextRetrieval(A_ )
_lowerCamelCase : int = ['''A picture of a woman with a dog sitting in a beach''']
_lowerCamelCase : Optional[Any] = tokenizer(
A_, return_tensors='''pt''', padding='''max_length''', truncation=A_, max_length=35, ).input_ids
hf_itm_model.load_state_dict(A_ )
hf_itm_model.eval()
_lowerCamelCase : Union[str, Any] = hf_itm_model(A_, A_, use_itm_head=A_ )
_lowerCamelCase : str = hf_itm_model(A_, A_, use_itm_head=A_ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0], dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 83
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "open-llama"
def __init__( self: int, a_: List[str]=100_000, a_: List[str]=4_096, a_: int=11_008, a_: Tuple=32, a_: Any=32, a_: Optional[Any]="silu", a_: Any=2_048, a_: List[Any]=0.02, a_: int=1E-6, a_: Optional[int]=True, a_: List[str]=0, a_: Any=1, a_: Optional[int]=2, a_: Tuple=False, a_: List[Any]=True, a_: Optional[int]=0.1, a_: Tuple=0.1, a_: List[Any]=True, a_: Optional[int]=True, a_: Dict=None, **a_: int, ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : str = hidden_size
_snake_case : Dict = intermediate_size
_snake_case : str = num_hidden_layers
_snake_case : int = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : Dict = initializer_range
_snake_case : Tuple = rms_norm_eps
_snake_case : Dict = use_cache
_snake_case : Optional[int] = kwargs.pop(
"""use_memorry_efficient_attention""", a_ )
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_dropout_prob
_snake_case : Optional[int] = use_stable_embedding
_snake_case : int = shared_input_output_embedding
_snake_case : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, tie_word_embeddings=a_, **a_, )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
_snake_case : Optional[int] = self.rope_scaling.get("""type""", a_ )
_snake_case : Optional[int] = self.rope_scaling.get("""factor""", a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(a_, a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 609
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : int = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase_ : str = VideoClassificationPipeline(model=__lowercase , image_processor=__lowercase , top_k=2 )
lowerCAmelCase_ : Dict = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> Tuple:
for example in examples:
lowerCAmelCase_ : List[Any] = video_classifier(__lowercase )
self.assertEqual(
__lowercase , [
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
{'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )},
] , )
@require_torch
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase_ : Dict = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} )
lowerCAmelCase_ : Dict = pipeline(
'''video-classification''' , model=__lowercase , feature_extractor=__lowercase , frame_sampling_rate=4 )
lowerCAmelCase_ : Any = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase_ : Dict = video_classifier(__lowercase , top_k=2 )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase_ : Optional[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}],
[{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowercase_ ( self ) -> str:
pass
| 619
|
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 619
| 1
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __lowercase ):
def __init__( self : List[Any] , *__snake_case : List[Any] , **__snake_case : Optional[Any] ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 166
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( __lowercase ):
a__ : List[str] = """longformer"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[List[int], int] = 5_12 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 3_05_22 , SCREAMING_SNAKE_CASE__ : int = 7_68 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 12 , SCREAMING_SNAKE_CASE__ : int = 30_72 , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 5_12 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1e-12 , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : str , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = attention_window
__lowerCamelCase = sep_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = onnx_export
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "PretrainedConfig" , SCREAMING_SNAKE_CASE__ : str = "default" , SCREAMING_SNAKE_CASE__ : "List[PatchingSpec]" = None ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = True
@property
def __A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
__lowerCamelCase = super().outputs
if self.task == "default":
__lowerCamelCase = {0: '''batch'''}
return outputs
@property
def __A ( self : Tuple ) -> float:
return 1e-4
@property
def __A ( self : str ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowerCamelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__lowerCamelCase = 1
return inputs
| 298
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case : Optional[int] = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : int ):
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :bool = True ,__snake_case :Union[int, float] = 1 / 2_55 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'shortest_edge': 2_56}
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case ,param_name='crop_size' )
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = resample
a__ = do_rescale
a__ = rescale_factor
a__ = offset
a__ = do_normalize
a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__( self :Optional[Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :str ,) -> np.ndarray:
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(__snake_case ,size['shortest_edge'] ,default_to_square=__snake_case )
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :str ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__snake_case ,size=(size['height'], size['width']) ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Union[int, float] ,__snake_case :bool = True ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :str ,) -> Union[str, Any]:
a__ = image.astype(np.floataa )
if offset:
a__ = image - (scale / 2)
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Union[float, List[float]] ,__snake_case :Union[float, List[float]] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Union[str, Any] ,) -> np.ndarray:
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[int] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :bool = None ,__snake_case :float = None ,__snake_case :bool = None ,__snake_case :bool = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[ChannelDimension] = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
a__ = to_numpy_array(__snake_case )
if do_resize:
a__ = self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case )
if do_center_crop:
a__ = self.center_crop(__snake_case ,size=__snake_case )
if do_rescale:
a__ = self.rescale(image=__snake_case ,scale=__snake_case ,offset=__snake_case )
if do_normalize:
a__ = self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case )
a__ = to_channel_dimension_format(__snake_case ,__snake_case )
return image
def lowerCamelCase__( self :Any ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :bool = None ,__snake_case :float = None ,__snake_case :bool = None ,__snake_case :bool = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :Any ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = resample if resample is not None else self.resample
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = offset if offset is not None else self.offset
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case )
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__snake_case ,param_name='crop_size' )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
a__ = make_batched(__snake_case )
a__ = [
[
self._preprocess_image(
image=__snake_case ,do_resize=__snake_case ,size=__snake_case ,resample=__snake_case ,do_center_crop=__snake_case ,crop_size=__snake_case ,do_rescale=__snake_case ,rescale_factor=__snake_case ,offset=__snake_case ,do_normalize=__snake_case ,image_mean=__snake_case ,image_std=__snake_case ,data_format=__snake_case ,)
for img in video
]
for video in videos
]
a__ = {'pixel_values': videos}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Dict = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
A_ : Optional[Any] = {'allegro/herbert-base-cased': 514}
A_ : Dict = {}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =VOCAB_FILES_NAMES
a : Tuple =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =HerbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase="</s>" , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sep_token=_lowerCamelCase , **_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: List[str] = [self.cls_token_id]
UpperCamelCase_: List[str] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Any = [self.sep_token_id]
UpperCamelCase_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Union[str, Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 57
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowerCAmelCase ( A_ : Optional[int] ) -> Optional[int]:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowerCAmelCase ( ) -> Dict:
__UpperCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=A_ )
__UpperCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A_ )
EnvironmentCommand.register_subcommand(A_ )
TestCommand.register_subcommand(A_ )
RunBeamCommand.register_subcommand(A_ )
DummyDataCommand.register_subcommand(A_ )
# Parse args
__UpperCAmelCase , __UpperCAmelCase = parser.parse_known_args()
if not hasattr(A_ , "func" ):
parser.print_help()
exit(1 )
__UpperCAmelCase = parse_unknown_args(A_ )
# Run
__UpperCAmelCase = args.func(A_ , **A_ )
service.run()
if __name__ == "__main__":
main()
| 221
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase_ : Dict = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase_ : str = '''=======
>>>>>>>
'''
lowerCAmelCase_ : Union[str, Any] = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase_ : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCamelCase_ ( a_ ):
@staticmethod
def UpperCamelCase_ ( snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=snake_case__ , required=snake_case__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=snake_case__ , required=snake_case__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , *snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = get_logger("""datasets-cli/converting""" )
UpperCAmelCase = tfds_path
UpperCAmelCase = datasets_directory
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase = os.listdir(snake_case__ )
else:
UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
UpperCAmelCase = os.path.join(snake_case__ , snake_case__ )
UpperCAmelCase = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case__ , encoding="""utf-8""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = []
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = []
for line in lines:
UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase = """"""
continue
elif "from absl import logging" in out_line:
UpperCAmelCase = """from datasets import logging\n"""
elif "getLogger" in out_line:
UpperCAmelCase = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase = True
UpperCAmelCase = list(filter(lambda snake_case__ : e in out_line , snake_case__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case__ ) + """\n""" )
out_lines.append(snake_case__ )
out_lines.append(snake_case__ )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase = re.sub(snake_case__ , snake_case__ , snake_case__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , snake_case__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
UpperCAmelCase = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase = True
out_lines.append(snake_case__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase = f_name.replace(""".py""" , """""" )
UpperCAmelCase = os.path.join(snake_case__ , snake_case__ )
UpperCAmelCase = os.path.join(snake_case__ , snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case__ )
if needs_manual_update:
with_manual_update.append(snake_case__ )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(snake_case__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
UpperCAmelCase = os.path.basename(snake_case__ )
UpperCAmelCase = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(snake_case__ , snake_case__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 378
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase_ :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=2 , snake_case__=24 , snake_case__=16 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=None , snake_case__=2 , snake_case__=2 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = patch_size
UpperCAmelCase = max_length
UpperCAmelCase = num_mel_bins
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = frequency_stride
UpperCAmelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase = frequency_out_dimension * time_out_dimension
UpperCAmelCase = num_patches + 2
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, input_values, labels
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = ASTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a_ , a_ , unittest.TestCase ):
_A : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_A : Any = False
_A : Dict = False
_A : Optional[Any] = False
_A : List[str] = False
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = ASTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""input_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ASTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCAmelCase , UpperCAmelCase = torchaudio.load(lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(snake_case__ )
UpperCAmelCase = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase = prepare_audio()
UpperCAmelCase = audio.squeeze().numpy()
UpperCAmelCase = feature_extractor(snake_case__ , sampling_rate=snake_case__ , return_tensors="""pt""" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**snake_case__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 378
| 1
|
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (pow(__lowerCAmelCase , 2 ) + step) % modulus
for _ in range(__lowerCAmelCase ):
# These track the position within the cycle detection logic.
snake_case__ : List[str] = seed
snake_case__ : Optional[int] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
snake_case__ : Dict = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[Any] = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[str] = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
snake_case__ : List[str] = gcd(hare - tortoise , __lowerCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
snake_case__ : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A__ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A__ = parser.parse_args()
A__ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
A__ = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 252
|
from __future__ import annotations
from typing import Any
class a :
def __init__( self :Union[str, Any] ,__lowercase :int ,__lowercase :int ,__lowercase :float = 0 ):
snake_case__ , snake_case__ : Dict = row, column
snake_case__ : Tuple = [[default_value for c in range(__lowercase )] for r in range(__lowercase )]
def __str__( self :List[Any] ):
snake_case__ : Tuple = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
snake_case__ : List[str] = 0
for row_vector in self.array:
for obj in row_vector:
snake_case__ : int = max(__lowercase ,len(str(__lowercase ) ) )
snake_case__ : Optional[Any] = F"""%{max_element_length}s"""
# Make string and return
def single_line(__lowercase :list[float] ) -> str:
nonlocal string_format_identifier
snake_case__ : List[Any] = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__lowercase ) for row_vector in self.array )
return s
def __repr__( self :List[str] ):
return str(self )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :tuple[int, int] ):
if not (isinstance(__lowercase ,(list, tuple) ) and len(__lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :Tuple ,__lowercase :tuple[int, int] ):
assert self.validate_indicies(__lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self :Tuple ,__lowercase :tuple[int, int] ,__lowercase :float ):
assert self.validate_indicies(__lowercase )
snake_case__ : Any = value
def __add__( self :Optional[int] ,__lowercase :Matrix ):
assert isinstance(__lowercase ,__lowercase )
assert self.row == another.row and self.column == another.column
# Add
snake_case__ : Optional[Any] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self :int ):
snake_case__ : Optional[Any] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Dict = -self[r, c]
return result
def __sub__( self :Any ,__lowercase :Matrix ):
return self + (-another)
def __mul__( self :Optional[Any] ,__lowercase :int | float | Matrix ):
if isinstance(__lowercase ,(int, float) ): # Scalar multiplication
snake_case__ : Optional[int] = Matrix(self.row ,self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Dict = self[r, c] * another
return result
elif isinstance(__lowercase ,__lowercase ): # Matrix multiplication
assert self.column == another.row
snake_case__ : List[str] = Matrix(self.row ,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case__ : Union[str, Any] = F"""Unsupported type given for another ({type(__lowercase )})"""
raise TypeError(__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = Matrix(self.column ,self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Optional[int] = self[r, c]
return result
def __lowerCamelCase ( self :List[Any] ,__lowercase :Matrix ,__lowercase :Matrix ):
assert isinstance(__lowercase ,__lowercase ) and isinstance(__lowercase ,__lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case__ : Optional[int] = v.transpose()
snake_case__ : Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case__ : Union[str, Any] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
snake_case__ : List[Any] = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : Tuple = 1, 2, -3
snake_case__ : Any = Matrix(3 , 1 , 0 )
snake_case__ , snake_case__ , snake_case__ : Dict = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}""" )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 252
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675
|
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class UpperCAmelCase ( __lowerCamelCase ):
a__: str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__: ClassVar[Features] = Features({"""audio""": Audio()} )
a__: ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
a__: str = "audio"
a__: str = "transcription"
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowercase : str = copy.deepcopy(self )
lowercase : List[Any] = self.input_schema.copy()
lowercase : Optional[Any] = features[self.audio_column]
lowercase : str = input_schema
return task_template
@property
def _lowerCAmelCase ( self : Dict ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 583
| 0
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class __snake_case:
def __init__( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = psutil.Process()
_SCREAMING_SNAKE_CASE = False
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = -1
while True:
_SCREAMING_SNAKE_CASE = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = threading.Thread(target=self.peak_monitor )
_SCREAMING_SNAKE_CASE = True
self.thread.start()
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase : int = PeakCPUMemory()
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated(UpperCamelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
_SCREAMING_SNAKE_CASE = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE = (torch.cuda.memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
_SCREAMING_SNAKE_CASE = (torch.cuda.max_memory_allocated(UpperCamelCase__ ) - start_measures[str(UpperCamelCase__ )]) / 2**20
return measures
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(UpperCamelCase__ )]:.2f}MiB''' )
_SCREAMING_SNAKE_CASE = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 168
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
for char in word:
_SCREAMING_SNAKE_CASE = ord(UpperCamelCase__ )
if not _is_chinese_char(UpperCamelCase__ ):
return 0
return 1
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = set()
for token in tokens:
_SCREAMING_SNAKE_CASE = len(UpperCamelCase__ ) > 1 and is_chinese(UpperCamelCase__ )
if chinese_word:
word_set.add(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = list(UpperCamelCase__ )
return word_list
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_SCREAMING_SNAKE_CASE = max([len(UpperCamelCase__ ) for w in chinese_word_set] )
_SCREAMING_SNAKE_CASE = bert_tokens
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, len(UpperCamelCase__ )
while start < end:
_SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
_SCREAMING_SNAKE_CASE = min(end - start , UpperCamelCase__ )
for i in range(UpperCamelCase__ , 1 , -1 ):
_SCREAMING_SNAKE_CASE = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_SCREAMING_SNAKE_CASE = '''##''' + bert_word[j]
_SCREAMING_SNAKE_CASE = start + i
_SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
_SCREAMING_SNAKE_CASE = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
_SCREAMING_SNAKE_CASE = [get_chinese_word(UpperCamelCase__ ) for r in res]
ltp_res.extend(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = []
for i in range(0 , len(UpperCamelCase__ ) , 100 ):
_SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(UpperCamelCase__ , UpperCamelCase__ ):
_SCREAMING_SNAKE_CASE = []
for id in input_ids:
_SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(UpperCamelCase__ )
input_tokens.append(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = add_sub_symbol(UpperCamelCase__ , UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase__ ):
if token[:2] == "##":
_SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase__ ) == 1 and _is_chinese_char(ord(UpperCamelCase__ ) ):
ref_id.append(UpperCamelCase__ )
ref_ids.append(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
return ref_ids
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(UpperCamelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
_SCREAMING_SNAKE_CASE = prepare_ref(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_SCREAMING_SNAKE_CASE = [json.dumps(UpperCamelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
lowerCamelCase : str = parser.parse_args()
main(args)
| 168
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( _A: Optional[int] , _A: Union[str, Any] , _A: Any ) -> str:
'''simple docstring'''
lowerCAmelCase = BertConfig.from_json_file(__snake_case )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase = BertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 370
|
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : int ,_a : int ,_a : float ,**_a : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = feature_size
_a : Dict = sampling_rate
_a : Tuple = padding_value
_a : str = kwargs.pop('padding_side' ,'right' )
_a : int = kwargs.pop('return_attention_mask' ,lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
def __lowercase ( self : int ,_a : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,_a : Union[bool, str, PaddingStrategy] = True ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
if isinstance(lowerCAmelCase_ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_a : Tuple = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
_a : str = processed_features[self.model_input_names[0]]
_a : Optional[int] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase_ ) == 0:
if return_attention_mask:
_a : int = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_a : int = required_input[0]
if isinstance(lowerCAmelCase_ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_a : Union[str, Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCAmelCase_ ):
_a : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase_ ):
_a : List[Any] = 'tf'
elif is_torch_tensor(lowerCAmelCase_ ):
_a : Tuple = 'pt'
elif isinstance(lowerCAmelCase_ ,(int, float, list, tuple, np.ndarray) ):
_a : Dict = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowerCAmelCase_ )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_a : Dict = to_numpy(lowerCAmelCase_ )
else:
_a : str = [to_numpy(lowerCAmelCase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_a : List[Any] = self._get_padding_strategies(padding=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
_a : Optional[Any] = processed_features[self.model_input_names[0]]
_a : Optional[int] = len(lowerCAmelCase_ )
if not all(len(lowerCAmelCase_ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_a : Union[str, Any] = []
for i in range(lowerCAmelCase_ ):
_a : Tuple = {k: v[i] for k, v in processed_features.items()}
# truncation
_a : Tuple = self._truncate(
lowerCAmelCase_ ,max_length=lowerCAmelCase_ ,pad_to_multiple_of=lowerCAmelCase_ ,truncation=lowerCAmelCase_ ,)
truncated_inputs.append(lowerCAmelCase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_a : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_a : List[str] = PaddingStrategy.MAX_LENGTH
_a : List[str] = {}
for i in range(lowerCAmelCase_ ):
# padding
_a : Optional[int] = self._pad(
truncated_inputs[i] ,max_length=lowerCAmelCase_ ,padding_strategy=lowerCAmelCase_ ,pad_to_multiple_of=lowerCAmelCase_ ,return_attention_mask=lowerCAmelCase_ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_a : Any = []
if value.dtype is np.dtype(np.floataa ):
_a : Tuple = value.astype(np.floataa )
batch_outputs[key].append(lowerCAmelCase_ )
return BatchFeature(lowerCAmelCase_ ,tensor_type=lowerCAmelCase_ )
def __lowercase ( self : Dict ,_a : Union[Dict[str, np.ndarray], BatchFeature] ,_a : Optional[int] = None ,_a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,_a : Optional[int] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_a : Optional[int] = len(lowerCAmelCase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a : List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_a : Any = np.ones(len(lowerCAmelCase_ ) ,dtype=np.intaa )
if needs_to_be_padded:
_a : List[Any] = max_length - len(lowerCAmelCase_ )
if self.padding_side == "right":
if return_attention_mask:
_a : Tuple = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_a : Optional[int] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_a : List[Any] = np.pad(
lowerCAmelCase_ ,lowerCAmelCase_ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_a : Tuple = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_a : Optional[int] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_a : int = np.pad(
lowerCAmelCase_ ,lowerCAmelCase_ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __lowercase ( self : int ,_a : Union[Dict[str, np.ndarray], BatchFeature] ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_a : List[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a : Union[str, Any] = len(lowerCAmelCase_ ) > max_length
if needs_to_be_truncated:
_a : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_a : Union[str, Any] = processed_features['attention_mask'][:max_length]
return processed_features
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any]=False ,_a : List[Any]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
_a : Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
_a : Dict = PaddingStrategy(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
_a : Any = padding
else:
_a : Tuple = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 712
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( _lowerCamelCase: Union[dict, list, tuple, torch.Tensor] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Tuple[int, ...] ):
__SCREAMING_SNAKE_CASE : List[str] = []
for d in reversed(SCREAMING_SNAKE_CASE_ ):
idx.append(flat_idx % d )
__SCREAMING_SNAKE_CASE : Any = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE_ ) )
@torch.jit.ignore
def lowerCAmelCase_ ( _lowerCamelCase: Sequence[int] , _lowerCamelCase: Sequence[int] , _lowerCamelCase: Sequence[int] , _lowerCamelCase: Optional[Sequence[bool]] = None , _lowerCamelCase: Optional[Sequence[bool]] = None , ):
def reduce_edge_list(_lowerCamelCase: List[bool] ) -> None:
__SCREAMING_SNAKE_CASE : List[Any] = True
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__SCREAMING_SNAKE_CASE : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
__SCREAMING_SNAKE_CASE : List[Any] = l[reversed_idx]
if start_edges is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
if end_edges is None:
__SCREAMING_SNAKE_CASE : List[str] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
__SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) )
else:
break
__SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : str = len(SCREAMING_SNAKE_CASE_ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__SCREAMING_SNAKE_CASE : str = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( _lowerCamelCase: torch.Tensor , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Dict = t.shape[:no_batch_dims]
__SCREAMING_SNAKE_CASE : Optional[int] = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# _get_minimal_slice_set is inclusive
__SCREAMING_SNAKE_CASE : int = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) )
# Get an ordered list of slices to perform
__SCREAMING_SNAKE_CASE : Optional[Any] = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__SCREAMING_SNAKE_CASE : List[str] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase_ ( _lowerCamelCase: Callable , _lowerCamelCase: Dict[str, Any] , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: bool = False , _lowerCamelCase: Any = None , _lowerCamelCase: bool = False , ):
if not (len(SCREAMING_SNAKE_CASE_ ) > 0):
raise ValueError("""Must provide at least one input""" )
__SCREAMING_SNAKE_CASE : int = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )]
__SCREAMING_SNAKE_CASE : str = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] )
def _prep_inputs(_lowerCamelCase: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__SCREAMING_SNAKE_CASE : Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__SCREAMING_SNAKE_CASE : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE : Dict = None
if _out is not None:
__SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__SCREAMING_SNAKE_CASE : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : List[str] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Chunk the input
if not low_mem:
__SCREAMING_SNAKE_CASE : Tuple = _select_chunk
else:
__SCREAMING_SNAKE_CASE : Tuple = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , )
__SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Run the layer on the chunk
__SCREAMING_SNAKE_CASE : List[str] = layer(**SCREAMING_SNAKE_CASE_ )
# Allocate space for the output
if out is None:
__SCREAMING_SNAKE_CASE : int = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def assign(_lowerCamelCase: dict , _lowerCamelCase: dict ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assign(SCREAMING_SNAKE_CASE_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__SCREAMING_SNAKE_CASE : List[Any] = da[k]
assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__SCREAMING_SNAKE_CASE : Tuple = xa
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
__SCREAMING_SNAKE_CASE : List[Any] = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
return out
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple = 5_1_2 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = max_chunk_size
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[tuple] = None
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__SCREAMING_SNAKE_CASE : int = [c for c in candidates if c > min_chunk_size]
__SCREAMING_SNAKE_CASE : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCAmelCase__ : Optional[Any] ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase_ , chunk_size=UpperCamelCase_ )
return True
except RuntimeError:
return False
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : Any = len(UpperCamelCase_ ) - 1
while i > min_viable_chunk_size_index:
__SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
__SCREAMING_SNAKE_CASE : Any = (min_viable_chunk_size_index + i) // 2
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = i
__SCREAMING_SNAKE_CASE : Optional[int] = (i + len(UpperCamelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = True
for aa, aa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert type(UpperCamelCase_ ) == type(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE : str = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase__ : x[0] )]
__SCREAMING_SNAKE_CASE : int = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : tuple = tree_map(lambda lowerCAmelCase__ : a.shape if isinstance(UpperCamelCase_ , torch.Tensor ) else a , UpperCamelCase_ , UpperCamelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE : str = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase_ )
else:
# Otherwise, we can reuse the precomputed value
__SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._determine_favorable_chunk_size(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
__SCREAMING_SNAKE_CASE : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 578
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE : Union[str, Any] = "Create a default config file for Accelerate with only a few flags set."
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any]="no" , SCREAMING_SNAKE_CASE_ : str = default_json_config_file , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
a_ : Optional[Any] = Path(SCREAMING_SNAKE_CASE_ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a_ : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a_ : str = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
a_ : Union[str, Any] = torch.cuda.device_count()
a_ : Any = num_gpus
a_ : Union[str, Any] = False
if num_gpus > 1:
a_ : str = """MULTI_GPU"""
else:
a_ : List[str] = """NO"""
elif is_xpu_available() and use_xpu:
a_ : List[Any] = torch.xpu.device_count()
a_ : List[str] = num_xpus
a_ : List[Any] = False
if num_xpus > 1:
a_ : List[str] = """MULTI_XPU"""
else:
a_ : Union[str, Any] = """NO"""
elif is_npu_available():
a_ : Tuple = torch.npu.device_count()
a_ : Union[str, Any] = num_npus
a_ : List[Any] = False
if num_npus > 1:
a_ : List[str] = """MULTI_NPU"""
else:
a_ : Optional[int] = """NO"""
else:
a_ : List[str] = 0
a_ : Optional[int] = True
a_ : Optional[Any] = 1
a_ : List[str] = """NO"""
a_ : int = ClusterConfig(**SCREAMING_SNAKE_CASE_ )
config.to_json_file(SCREAMING_SNAKE_CASE_ )
return path
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
a_ : Dict = parser.add_parser("""default""" , parents=SCREAMING_SNAKE_CASE_ , help=SCREAMING_SNAKE_CASE_ , formatter_class=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=SCREAMING_SNAKE_CASE_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
a_ : int = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 419
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( snake_case ):
UpperCAmelCase : Any = """deformable_detr"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict , a_ : Optional[int]=True , a_ : Optional[int]=None , a_ : Tuple=3 , a_ : Optional[int]=3_0_0 , a_ : Tuple=1_0_2_4 , a_ : Union[str, Any]=6 , a_ : List[str]=1_0_2_4 , a_ : Union[str, Any]=8 , a_ : Any=6 , a_ : int=1_0_2_4 , a_ : Tuple=8 , a_ : Dict=0.0 , a_ : Union[str, Any]=True , a_ : Optional[int]="relu" , a_ : int=2_5_6 , a_ : Any=0.1 , a_ : Optional[int]=0.0 , a_ : Tuple=0.0 , a_ : Union[str, Any]=0.0_2 , a_ : List[Any]=1.0 , a_ : Optional[int]=True , a_ : Tuple=False , a_ : Optional[Any]="sine" , a_ : Optional[int]="resnet50" , a_ : List[str]=True , a_ : List[Any]=False , a_ : List[str]=4 , a_ : Tuple=4 , a_ : str=4 , a_ : Tuple=False , a_ : Optional[int]=3_0_0 , a_ : int=False , a_ : Optional[Any]=1 , a_ : int=5 , a_ : Optional[int]=2 , a_ : str=1 , a_ : Optional[int]=1 , a_ : Optional[Any]=5 , a_ : Optional[Any]=2 , a_ : List[Any]=0.1 , a_ : List[Any]=0.2_5 , a_ : Dict=False , **a_ : str , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case: Union[str, Any] =CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(a_ , a_ ):
snake_case: Dict =backbone_config.get('model_type' )
snake_case: List[Any] =CONFIG_MAPPING[backbone_model_type]
snake_case: str =config_class.from_dict(a_ )
snake_case: List[str] =use_timm_backbone
snake_case: Optional[int] =backbone_config
snake_case: List[Any] =num_channels
snake_case: List[Any] =num_queries
snake_case: List[Any] =max_position_embeddings
snake_case: str =d_model
snake_case: Tuple =encoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[Any] =encoder_attention_heads
snake_case: Optional[Any] =decoder_ffn_dim
snake_case: Union[str, Any] =decoder_layers
snake_case: int =decoder_attention_heads
snake_case: Union[str, Any] =dropout
snake_case: Optional[int] =attention_dropout
snake_case: Any =activation_dropout
snake_case: Dict =activation_function
snake_case: Union[str, Any] =init_std
snake_case: Union[str, Any] =init_xavier_std
snake_case: Tuple =encoder_layerdrop
snake_case: List[str] =auxiliary_loss
snake_case: Dict =position_embedding_type
snake_case: Any =backbone
snake_case: Union[str, Any] =use_pretrained_backbone
snake_case: List[Any] =dilation
# deformable attributes
snake_case: Union[str, Any] =num_feature_levels
snake_case: str =encoder_n_points
snake_case: str =decoder_n_points
snake_case: Any =two_stage
snake_case: Dict =two_stage_num_proposals
snake_case: str =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
snake_case: Dict =class_cost
snake_case: Dict =bbox_cost
snake_case: int =giou_cost
# Loss coefficients
snake_case: Dict =mask_loss_coefficient
snake_case: int =dice_loss_coefficient
snake_case: List[str] =bbox_loss_coefficient
snake_case: Tuple =giou_loss_coefficient
snake_case: Union[str, Any] =eos_coefficient
snake_case: List[Any] =focal_alpha
snake_case: str =disable_custom_kernels
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : List[str] ) -> int:
return self.d_model
def UpperCamelCase ( self : str ) -> Optional[Any]:
snake_case: Dict =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case: Union[str, Any] =self.backbone_config.to_dict()
snake_case: Dict =self.__class__.model_type
return output
| 347
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case ):
UpperCAmelCase : Optional[torch.FloatTensor] = None
UpperCAmelCase : torch.FloatTensor = None
UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class a_ ( snake_case ):
def __init__( self : int , a_ : str=1 , a_ : Any=0 , a_ : List[str]=2 , a_ : List[Any]=5_1_2 , a_ : Union[str, Any]="cls" , a_ : Dict=False , a_ : Optional[int]=True , **a_ : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
snake_case: Any =project_dim
snake_case: Optional[Any] =pooler_fn
snake_case: List[str] =learn_encoder
snake_case: Optional[Any] =use_attention_mask
class a_ ( snake_case ):
UpperCAmelCase : int = [r"""pooler""", r"""logit_scale"""]
UpperCAmelCase : List[Any] = [r"""position_ids""", r"""predictions.decoder.bias"""]
UpperCAmelCase : Union[str, Any] = """roberta"""
UpperCAmelCase : Tuple = RobertaSeriesConfig
def __init__( self : int , a_ : int ) -> Any:
super().__init__(a_ )
snake_case: List[str] =XLMRobertaModel(a_ )
snake_case: Optional[int] =nn.Linear(config.hidden_size , config.project_dim )
snake_case: Optional[Any] =getattr(a_ , 'has_pre_transformation' , a_ )
if self.has_pre_transformation:
snake_case: str =nn.Linear(config.hidden_size , config.project_dim )
snake_case: Optional[int] =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase ( self : Dict , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[torch.Tensor] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , ) -> Tuple:
snake_case: Dict =return_dict if return_dict is not None else self.config.use_return_dict
snake_case: List[str] =self.base_model(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_attentions=a_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a_ , )
if self.has_pre_transformation:
snake_case: Union[str, Any] =outputs['hidden_states'][-2]
snake_case: List[str] =self.pre_LN(a_ )
snake_case: Optional[int] =self.transformation_pre(a_ )
return TransformationModelOutput(
projection_state=a_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
snake_case: str =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 347
| 1
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
def A_ ( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, Iterable[int]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : int ) -> Tuple[int, int]:
def constraint_to_multiple_of(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE : Tuple = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE : Dict = (output_size, output_size) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_image_size(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE : Optional[int] = output_height / input_height
__SCREAMING_SNAKE_CASE : str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE : List[str] = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE : int = scale_height
__SCREAMING_SNAKE_CASE : Dict = constraint_to_multiple_of(scale_height * input_height , multiple=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = False , lowercase = 1 , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
__SCREAMING_SNAKE_CASE : int = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
__SCREAMING_SNAKE_CASE : int = get_size_dict(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = do_resize
__SCREAMING_SNAKE_CASE : Any = size
__SCREAMING_SNAKE_CASE : List[str] = keep_aspect_ratio
__SCREAMING_SNAKE_CASE : int = ensure_multiple_of
__SCREAMING_SNAKE_CASE : Union[str, Any] = resample
__SCREAMING_SNAKE_CASE : Tuple = do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
__SCREAMING_SNAKE_CASE : Dict = do_normalize
__SCREAMING_SNAKE_CASE : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase , lowercase , lowercase = False , lowercase = 1 , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(
lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=lowercase , multiple=lowercase , )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[int]:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE : List[Any] = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : str = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : List[str] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def _snake_case ( self , lowercase , lowercase = None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
__SCREAMING_SNAKE_CASE : Dict = target_sizes.numpy()
__SCREAMING_SNAKE_CASE : Tuple = []
for idx in range(len(lowercase ) ):
__SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
__SCREAMING_SNAKE_CASE : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
__SCREAMING_SNAKE_CASE : Any = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 158
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Any = seq_length
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : str = num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = scope
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vocab_size - 1
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
__SCREAMING_SNAKE_CASE : str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
__a : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__a : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__a : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
__SCREAMING_SNAKE_CASE : Any = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[str] = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase , n_embd=3_7 )
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def _snake_case ( self ) -> str:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowercase ) # the president is
__SCREAMING_SNAKE_CASE : Any = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : List[Any] = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 158
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( a ):
__UpperCAmelCase : Optional[Any] = (DPMSolverSinglestepScheduler,)
__UpperCAmelCase : Dict = (("""num_inference_steps""", 25),)
def __lowerCAmelCase ( self , **snake_case_ ) -> List[str]:
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**snake_case_ )
return config
def __lowerCAmelCase ( self , snake_case_=0 , **snake_case_ ) -> Union[str, Any]:
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , snake_case_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**snake_case_ )
_a = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_a = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_a = dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_a = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self , snake_case_=0 , **snake_case_ ) -> str:
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , snake_case_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_a = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[: new_scheduler.config.solver_order]
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_a = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , snake_case_=None , **snake_case_ ) -> Union[str, Any]:
if scheduler is None:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**snake_case_ )
_a = scheduler_class(**snake_case_ )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**snake_case_ )
_a = scheduler_class(**snake_case_ )
_a = 1_0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_a = model(snake_case_ , snake_case_ )
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> Tuple:
_a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a = 5_0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_a = model(snake_case_ , snake_case_ )
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def __lowerCAmelCase ( self ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a = self.full_loop(scheduler=snake_case_ )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
_a = DEISMultistepScheduler.from_config(scheduler.config )
_a = DPMSolverMultistepScheduler.from_config(scheduler.config )
_a = UniPCMultistepScheduler.from_config(scheduler.config )
_a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a = self.full_loop(scheduler=snake_case_ )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type="dpmsolver++" , solver_order=snake_case_ , solver_type=snake_case_ , )
def __lowerCAmelCase ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
_a = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> List[Any]:
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def __lowerCAmelCase ( self ) -> int:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __lowerCAmelCase ( self ) -> List[str]:
self.check_over_configs(variance_type=snake_case_ )
self.check_over_configs(variance_type="learned_range" )
def __lowerCAmelCase ( self ) -> Tuple:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.full_loop()
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.full_loop(use_karras_sigmas=snake_case_ )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case_ )
_a = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
_a = scheduler_class(**snake_case_ )
_a = 1_0
_a = self.dummy_model()
_a = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_a = model(snake_case_ , snake_case_ )
_a = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
| 691
|
'''simple docstring'''
__snake_case : List[str] = "Tobias Carryer"
from time import time
class A :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ) -> str: # noqa: B008
_a = multiplier
_a = increment
_a = modulo
_a = seed
def __lowerCAmelCase ( self ) -> str:
_a = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__snake_case : Union[str, Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 691
| 1
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase_ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase_ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase_ = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
UpperCAmelCase_ : Any = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 95
|
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , snake_case=None , snake_case=None , **snake_case) -> Tuple:
'''simple docstring'''
super().__init__(*snake_case , **snake_case)
_UpperCAmelCase : Any =eval_examples
_UpperCAmelCase : Optional[int] =post_process_function
def lowerCAmelCase ( self , snake_case = None , snake_case=None , snake_case = None , snake_case = "eval" , **snake_case , ) -> Dict[str, float]:
'''simple docstring'''
_UpperCAmelCase : Tuple =gen_kwargs.copy()
_UpperCAmelCase : int =(
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
_UpperCAmelCase : Dict =(
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
_UpperCAmelCase : Dict =gen_kwargs
_UpperCAmelCase : Tuple =self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase : Union[str, Any] =self.get_eval_dataloader(snake_case)
_UpperCAmelCase : Tuple =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : Dict =self.compute_metrics
_UpperCAmelCase : Optional[Any] =None
_UpperCAmelCase : str =time.time()
_UpperCAmelCase : int =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase : str =eval_loop(
snake_case , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
_UpperCAmelCase : str =compute_metrics
_UpperCAmelCase : Optional[int] =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase : Tuple =self.post_process_function(snake_case , snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self.compute_metrics(snake_case)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
_UpperCAmelCase : Any =metrics.pop(snake_case)
metrics.update(output.metrics)
else:
_UpperCAmelCase : str =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_UpperCAmelCase : Any =self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case)
return metrics
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=None , snake_case = "test" , **snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =gen_kwargs.copy()
_UpperCAmelCase : int =self.get_test_dataloader(snake_case)
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : str =self.compute_metrics
_UpperCAmelCase : Union[str, Any] =None
_UpperCAmelCase : Union[str, Any] =time.time()
_UpperCAmelCase : Tuple =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase : Any =eval_loop(
snake_case , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
_UpperCAmelCase : Any =compute_metrics
_UpperCAmelCase : Dict =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase : Tuple =self.post_process_function(snake_case , snake_case , snake_case , 'predict')
_UpperCAmelCase : Union[str, Any] =self.compute_metrics(snake_case)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
_UpperCAmelCase : Any =metrics.pop(snake_case)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case)
| 446
| 0
|
"""simple docstring"""
from functools import lru_cache
def a__ ( __lowercase ) -> set:
_A = 2
_A = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowercase )
if n > 1:
factors.add(__lowercase )
return factors
@lru_cache
def a__ ( __lowercase ) -> int:
return len(unique_prime_factors(__lowercase ) )
def a__ ( __lowercase ) -> bool:
return len(set(__lowercase ) ) in (0, 1)
def a__ ( __lowercase ) -> list:
_A = 2
while True:
# Increment each value of a generated range
_A = [base + i for i in range(__lowercase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_A = [upf_len(__lowercase ) for x in group]
checker.append(__lowercase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowercase ):
return group
# Increment our base variable by 1
base += 1
def a__ ( __lowercase = 4 ) -> int:
_A = run(__lowercase )
return results[0] if len(__lowercase ) else None
if __name__ == "__main__":
print(solution())
| 718
|
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Tuple ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModel.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModel.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : List[str] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelForPreTraining.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : Dict ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = AutoModelForCausalLM.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : List[str] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelWithLMHead.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
__lowerCAmelCase , output_loading_info=__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def a ( self : str ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4410 )
_lowerCAmelCase = AutoModelWithLMHead.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4410 )
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase , from_pt=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4410 )
_lowerCAmelCase = AutoModelWithLMHead.from_pretrained(__lowerCAmelCase , from_tf=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_4410 )
| 309
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
__A = StableUnCLIPPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A = False
def a ( self : int ):
"""simple docstring"""
_lowerCAmelCase = 32
_lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=__lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__lowerCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCAmelCase )
_lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCAmelCase , layers_per_block=1 , upcast_attention=__lowerCAmelCase , use_linear_projection=__lowerCAmelCase , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def a ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCAmelCase )
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__lowerCAmelCase )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
_lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = pipe('anime turle' , generator=__lowerCAmelCase , output_type='np' )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
def a ( self : Tuple ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 309
| 1
|
"""simple docstring"""
snake_case_ : List[str] = 6_5_5_2_1
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[int] = 0
for plain_chr in plain_text:
UpperCAmelCase : int = (a + ord(_lowercase )) % MOD_ADLER
UpperCAmelCase : str = (b + a) % MOD_ADLER
return (b << 16) | a
| 712
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case__ :
def __init__( self : Optional[Any] , lowercase : int , lowercase : List[Any]=13 , lowercase : List[str]=2 , lowercase : Tuple=24 , lowercase : Dict=16 , lowercase : Optional[int]=True , lowercase : Any=True , lowercase : Dict=32 , lowercase : List[str]=5 , lowercase : Union[str, Any]=4 , lowercase : Tuple=37 , lowercase : Tuple="gelu" , lowercase : Dict=0.1 , lowercase : Any=0.1 , lowercase : Any=10 , lowercase : List[str]=0.0_2 , lowercase : int=None , lowercase : Optional[Any]=2 , lowercase : str=2 , ):
'''simple docstring'''
UpperCAmelCase : int = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : str = patch_size
UpperCAmelCase : Dict = max_length
UpperCAmelCase : Dict = num_mel_bins
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Optional[int] = scope
UpperCAmelCase : List[Any] = frequency_stride
UpperCAmelCase : Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase : List[str] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase : Dict = frequency_out_dimension * time_out_dimension
UpperCAmelCase : int = num_patches + 2
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[str] = self.get_config()
return config, input_values, labels
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __lowerCAmelCase ( self : List[Any] , lowercase : Optional[int] , lowercase : Any , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase : Tuple = ASTModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase : Optional[int] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Any , lowercase : str ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : int = ASTModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(lowercase )
UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[str] = [*signature.parameters.keys()]
UpperCAmelCase : List[str] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@slow
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = ASTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : List[str] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
UpperCAmelCase , UpperCAmelCase : str = torchaudio.load(_lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case__ ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.default_feature_extractor
UpperCAmelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowercase )
UpperCAmelCase : Any = self.default_feature_extractor
UpperCAmelCase , UpperCAmelCase : Optional[int] = prepare_audio()
UpperCAmelCase : str = audio.squeeze().numpy()
UpperCAmelCase : List[Any] = feature_extractor(lowercase , sampling_rate=lowercase , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase : Dict = model(**lowercase )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , lowercase )
UpperCAmelCase : Optional[Any] = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 292
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase: Any = logging.get_logger(__name__)
_lowercase: List[Any] = {"""vocab_file""": """sentencepiece.model"""}
_lowercase: Dict = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
_lowercase: List[Any] = {
"""google/rembert""": 2_5_6,
}
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ =VOCAB_FILES_NAMES
UpperCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : Any=False , lowercase__ : Tuple=True , lowercase__ : Union[str, Any]=True , lowercase__ : Union[str, Any]="[CLS]" , lowercase__ : Optional[int]="[SEP]" , lowercase__ : int="[UNK]" , lowercase__ : Any="[SEP]" , lowercase__ : str="[PAD]" , lowercase__ : int="[CLS]" , lowercase__ : Optional[int]="[MASK]" , **lowercase__ : List[str] , ):
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_A )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : Any , lowercase__ : str ):
_lowerCAmelCase = d
_lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str=False ):
_lowerCAmelCase = self.sp_model.EncodeAsPieces(_A )
return pieces
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Union[str, Any] ):
return self.sp_model.PieceToId(_A )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Optional[Any] ):
return self.sp_model.IdToPiece(_A )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : List[Any] ):
_lowerCAmelCase = self.sp_model.decode_pieces(_A )
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Dict , lowercase__ : List[str] = None ):
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Dict , lowercase__ : Dict = None , lowercase__ : Optional[int] = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] = None ):
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Union[str, Any] = None ):
if not os.path.isdir(_A ):
logger.error('Vocabulary path ({}) should be a directory'.format(_A ) )
return
_lowerCAmelCase = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 192
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = x
UpperCamelCase : str = y
for step in range(SCREAMING_SNAKE_CASE ): # noqa: B007
UpperCamelCase : str = a * a - b * b + x
UpperCamelCase : Any = 2 * a * b + y
UpperCamelCase : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(SCREAMING_SNAKE_CASE , 1 , 1 ) )
def UpperCamelCase (SCREAMING_SNAKE_CASE = 800 , SCREAMING_SNAKE_CASE = 600 , SCREAMING_SNAKE_CASE = -0.6 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 3.2 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = True , ):
UpperCamelCase : Tuple = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase : Dict = img.load()
# loop through the image-coordinates
for image_x in range(SCREAMING_SNAKE_CASE ):
for image_y in range(SCREAMING_SNAKE_CASE ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase : Dict = figure_width / image_width * image_height
UpperCamelCase : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase : Union[str, Any] = get_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase : int = get_color_coded_rgb(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Optional[int] = get_black_and_white_rgb(SCREAMING_SNAKE_CASE )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__magic_name__ : int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 102
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list[tuple[int, int]]:
A , A = position
A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
A = []
for position in positions:
A , A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase__ )
return permissible_positions
def lowerCAmelCase__ ( lowerCamelCase__ ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
if is_complete(lowerCamelCase__ ):
return True
for position in get_valid_pos(lowerCamelCase__ , len(lowerCamelCase__ ) ):
A , A = position
if board[y][x] == 0:
A = curr + 1
if open_knight_tour_helper(lowerCamelCase__ , lowerCamelCase__ , curr + 1 ):
return True
A = 0
return False
def lowerCAmelCase__ ( lowerCamelCase__ ) -> list[list[int]]:
A = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
A = 1
if open_knight_tour_helper(lowerCamelCase__ , (i, j) , 1 ):
return board
A = 0
A = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase__ ( ) -> Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase__ ( ) -> str:
assert _test_patching.open is open
A = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> List[Any]:
# pandas.read_csv is not present in _test_patching
A = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , lowerCamelCase__ ):
pass
def lowerCAmelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase__ ( ) -> Union[str, Any]:
A = '__test_patch_submodule_start_and_stop_mock__'
A = patch_submodule(_test_patching , 'open' , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase__ ( ) -> int:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A = '__test_patch_submodule_successive_join__'
A = '__test_patch_submodule_successive_dirname__'
A = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , lowerCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase__ ( ) -> Optional[Any]:
A = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , lowerCamelCase__ ):
pass
| 109
| 1
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 116
|
'''simple docstring'''
def lowerCamelCase__ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
snake_case : int = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 0:
return False
snake_case : Optional[int] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_UpperCAmelCase = random.Random()
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Any=1.0 ,__lowercase : str=None ,__lowercase : Dict=None ):
'''simple docstring'''
if rng is None:
A_ : Any = global_rng
A_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=4_0_0 , lowercase=2_0_0_0 , lowercase=1 , lowercase=0.0 , lowercase=1_6_0_0_0 , lowercase=True , lowercase=8_0 , lowercase=1_6 , lowercase=6_4 , lowercase="hann_window" , lowercase=8_0 , lowercase=7_6_0_0 , lowercase=1E-10 , lowercase=True , ):
"""simple docstring"""
A_ : str = parent
A_ : Tuple = batch_size
A_ : int = min_seq_length
A_ : Optional[int] = max_seq_length
A_ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ : int = feature_size
A_ : int = padding_value
A_ : Dict = sampling_rate
A_ : Tuple = do_normalize
A_ : int = num_mel_bins
A_ : Dict = hop_length
A_ : Optional[int] = win_length
A_ : Tuple = win_function
A_ : Dict = fmin
A_ : Optional[Any] = fmax
A_ : List[str] = mel_floor
A_ : List[str] = return_attention_mask
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self , lowercase=False , lowercase=False ):
"""simple docstring"""
def _flatten(lowercase ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
A_ : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : List[Any] = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self , lowercase=False , lowercase=False ):
"""simple docstring"""
if equal_length:
A_ : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ : List[str] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : int = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(_snake_case , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_snake_case , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ : Any = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
A_ : str = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
A_ : Any = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test batched
A_ : Tuple = feat_extract(_snake_case , return_tensors='np' ).input_values
A_ : Tuple = feat_extract(_snake_case , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ : Any = ['longest', 'max_length', 'do_not_pad']
A_ : List[Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(_snake_case , _snake_case ):
A_ : int = feat_extract(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors='np' )
A_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : List[str] = range(8_0_0 , 1_4_0_0 , 2_0_0 )
A_ : str = [floats_list((1, x) )[0] for x in lengths]
A_ : int = ['longest', 'max_length', 'do_not_pad']
A_ : int = [None, 1_6_0_0, None]
for max_length, padding in zip(_snake_case , _snake_case ):
A_ : List[str] = feat_extract(_snake_case , max_length=_snake_case , padding=_snake_case )
A_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ : str = feat_extract(
_snake_case , truncation=_snake_case , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' )
A_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ : Any = feat_extract(
_snake_case , truncation=_snake_case , max_length=1_0_0_0 , padding='longest' , return_tensors='np' )
A_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
A_ : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ : Union[str, Any] = feat_extract(
_snake_case , truncation=_snake_case , max_length=2_0_0_0 , padding='longest' , return_tensors='np' )
A_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ : Tuple = np.random.rand(1_0_0 ).astype(np.floataa )
A_ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A_ : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A_ : int = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
A_ : List[Any] = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test feature size
A_ : Tuple = feature_extractor(audio_target=_snake_case , padding=_snake_case , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
A_ : int = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
A_ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test batched
A_ : Dict = feature_extractor(_snake_case , return_tensors='np' ).input_values
A_ : Optional[Any] = feature_extractor(_snake_case , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
A_ : Any = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A_ : str = np.asarray(_snake_case )
A_ : List[Any] = feature_extractor(_snake_case , return_tensors='np' ).input_values
A_ : Tuple = feature_extractor(_snake_case , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_snake_case , _snake_case ):
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
A_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : List[Any] = feat_extract.model_input_names[0]
A_ : str = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_snake_case ) == len(_snake_case ) for x, y in zip(_snake_case , processed_features[input_name] ) ) )
A_ : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case )
A_ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
A_ : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A_ : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_snake_case )
A_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : Tuple = feat_extract.model_input_names[0]
A_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
A_ : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A_ : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
A_ : Tuple = feat_extract.model_input_names[0]
A_ : Tuple = BatchFeature({input_name: speech_inputs} )
A_ : Union[str, Any] = feat_extract.num_mel_bins # hack!
A_ : Dict = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )[input_name]
A_ : Union[str, Any] = feat_extract.pad(_snake_case , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.feat_extract_dict
A_ : str = True
A_ : List[str] = self.feature_extraction_class(**_snake_case )
A_ : int = self.feat_extract_tester.prepare_inputs_for_target()
A_ : Optional[Any] = [len(_snake_case ) for x in speech_inputs]
A_ : Dict = feat_extract.model_input_names[0]
A_ : int = BatchFeature({input_name: speech_inputs} )
A_ : List[str] = feat_extract.num_mel_bins # hack!
A_ : str = feat_extract.pad(_snake_case , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _snake_case )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.feat_extract_dict
A_ : Optional[int] = True
A_ : Any = self.feature_extraction_class(**_snake_case )
A_ : Any = self.feat_extract_tester.prepare_inputs_for_target()
A_ : str = [len(_snake_case ) for x in speech_inputs]
A_ : str = feat_extract.model_input_names[0]
A_ : int = BatchFeature({input_name: speech_inputs} )
A_ : Any = min(_snake_case )
A_ : Union[str, Any] = feat_extract.num_mel_bins # hack!
A_ : List[str] = feat_extract.pad(
_snake_case , padding='max_length' , max_length=_snake_case , truncation=_snake_case , return_tensors='np' )
self.assertIn('attention_mask' , _snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
from datasets import load_dataset
A_ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
A_ : Dict = ds.sort('id' ).select(range(_snake_case ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
A_ : str = self._load_datasamples(1 )
A_ : Union[str, Any] = SpeechTaFeatureExtractor()
A_ : Tuple = feature_extractor(_snake_case , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , _snake_case , atol=1E-6 ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
A_ : Union[str, Any] = self._load_datasamples(1 )
A_ : Optional[Any] = SpeechTaFeatureExtractor()
A_ : Any = feature_extractor(audio_target=_snake_case , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _snake_case , atol=1E-4 ) )
| 708
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''wavlm'''
def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A_ : List[Any] = hidden_size
A_ : Tuple = feat_extract_norm
A_ : Dict = feat_extract_activation
A_ : Optional[Any] = list(lowercase )
A_ : Union[str, Any] = list(lowercase )
A_ : List[str] = list(lowercase )
A_ : str = conv_bias
A_ : Tuple = num_buckets
A_ : Union[str, Any] = max_bucket_distance
A_ : int = num_conv_pos_embeddings
A_ : str = num_conv_pos_embedding_groups
A_ : str = len(self.conv_dim )
A_ : Tuple = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = num_attention_heads
A_ : str = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Optional[int] = feat_proj_dropout
A_ : List[Any] = final_dropout
A_ : Union[str, Any] = layerdrop
A_ : Dict = layer_norm_eps
A_ : Optional[Any] = initializer_range
A_ : str = num_ctc_classes
A_ : Any = vocab_size
A_ : str = do_stable_layer_norm
A_ : int = use_weighted_layer_sum
A_ : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[str] = apply_spec_augment
A_ : Optional[Any] = mask_time_prob
A_ : int = mask_time_length
A_ : Any = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Tuple = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : int = num_codevectors_per_group
A_ : Any = num_codevector_groups
A_ : List[Any] = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[Any] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Any = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Optional[Any] = adapter_kernel_size
A_ : Optional[int] = adapter_stride
A_ : Dict = num_adapter_layers
A_ : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : Tuple = list(lowercase )
A_ : Optional[Any] = list(lowercase )
A_ : Dict = list(lowercase )
A_ : Dict = xvector_output_dim
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70
| 0
|
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = (boundary[1] - boundary[0]) / steps
UpperCamelCase__ = boundary[0]
UpperCamelCase__ = boundary[1]
UpperCamelCase__ = make_points(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = a + h
while x < (b - h):
yield x
UpperCamelCase__ = x + h
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ): # enter your function here
'''simple docstring'''
UpperCamelCase__ = (x - 0) * (x - 0)
return y
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = 0.0 # Lower bound of integration
UpperCamelCase__ = 1.0 # Upper bound of integration
UpperCamelCase__ = 10.0 # define number of steps or resolution
UpperCamelCase__ = [a, b] # define boundary of integration
UpperCamelCase__ = method_a(UpperCamelCase__, UpperCamelCase__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 240
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[str] ):
UpperCamelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCamelCase__ = dict(zip(_a , range(len(_a ) ) ) )
UpperCamelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCamelCase__ = {'''unk_token''': '''<unk>'''}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
UpperCamelCase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase__ = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def A_ ( self : Optional[Any] , **_a : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Dict , **_a : List[Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Union[str, Any] , **_a : Union[str, Any] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def A_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[str] ):
UpperCamelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def A_ ( self : int ):
UpperCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
UpperCamelCase__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(_a , return_tensors='''np''' )
UpperCamelCase__ = processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = processor(text=_a )
UpperCamelCase__ = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Any ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(_a )
UpperCamelCase__ = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=_a , image_processor=_a )
UpperCamelCase__ = '''lower newer'''
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 240
| 1
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : x[0] / x[1] , reverse=__UpperCamelCase )
__A , __A = [i[0] for i in r], [i[1] for i in r]
__A = list(accumulate(__UpperCamelCase ) )
__A = bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
lowercase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 215
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase : Dict = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase__ ( _a : List[str] ):
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
snake_case_ : Any = [image]
snake_case_ : Union[str, Any] = [trans(img.convert("RGB" ) ) for img in image]
snake_case_ : List[str] = torch.stack(lowerCAmelCase_ )
return image
class UpperCAmelCase_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# get the original timestep using init_timestep
snake_case_ : Optional[int] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
snake_case_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
snake_case_ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}''' )
snake_case_ : List[str] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ : Dict = init_latents.shape
snake_case_ : str = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
print("add noise to latents at timestep" , lowerCAmelCase_ )
snake_case_ : str = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.8 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(lowerCAmelCase_ )
# 2. Preprocess image
snake_case_ : Union[str, Any] = preprocess(lowerCAmelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
snake_case_ , snake_case_ : Optional[int] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , self.device )
snake_case_ : Optional[Any] = timesteps[:1].repeat(lowerCAmelCase_ )
# 4. Prepare latent variables
snake_case_ : Union[str, Any] = self.prepare_latents(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.unet.dtype , self.device , lowerCAmelCase_ )
snake_case_ : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCAmelCase_ ):
# 1. predict noise model_output
snake_case_ : Optional[int] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ : List[str] = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , eta=lowerCAmelCase_ , use_clipped_model_output=lowerCAmelCase_ , generator=lowerCAmelCase_ , ).prev_sample
snake_case_ : Any = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Tuple = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 568
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : list[float] ):
if len(lowerCAmelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
if len(lowercase_ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase_ ) ):
_UpperCamelCase : Dict = nums.pop(0 )
_UpperCamelCase : Any = permute(lowercase_ )
for perm in permutations:
perm.append(lowercase_ )
result.extend(lowercase_ )
nums.append(lowercase_ )
return result
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
def backtrack(lowercase_ ):
if start == len(lowercase_ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase_ ,len(lowercase_ ) ):
_UpperCamelCase : Tuple = nums[i], nums[start]
backtrack(start + 1 )
_UpperCamelCase : Any = nums[i], nums[start] # backtrack
_UpperCamelCase : str = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCamelCase__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 710
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict=3 , __a : Any=3 , __a : Union[str, Any]=("DownEncoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Tuple=32 , __a : int="silu" , __a : str=True , ) -> Dict:
super().__init__()
_UpperCamelCase : List[str] = layers_per_block
_UpperCamelCase : Dict = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Any = nn.ModuleList([] )
# down
_UpperCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
_UpperCamelCase : Tuple = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : int = i == len(__a ) - 1
_UpperCamelCase : Dict = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
_UpperCamelCase : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
_UpperCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : Any = nn.SiLU()
_UpperCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
_UpperCamelCase : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Dict ) -> List[str]:
_UpperCamelCase : int = x
_UpperCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Tuple ):
def custom_forward(*__a : Any ):
return module(*__a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
_UpperCamelCase : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
_UpperCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : int = down_block(__a )
# middle
_UpperCamelCase : int = self.mid_block(__a )
# post-process
_UpperCamelCase : Any = self.conv_norm_out(__a )
_UpperCamelCase : Any = self.conv_act(__a )
_UpperCamelCase : Optional[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : int=3 , __a : Any=3 , __a : str=("UpDecoderBlock2D",) , __a : Optional[int]=(64,) , __a : int=2 , __a : Optional[int]=32 , __a : Tuple="silu" , __a : Union[str, Any]="group" , ) -> str:
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Tuple = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : List[Any] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
_UpperCamelCase : List[str] = list(reversed(__a ) )
_UpperCamelCase : int = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : Union[str, Any] = reversed_block_out_channels[i]
_UpperCamelCase : Optional[Any] = i == len(__a ) - 1
_UpperCamelCase : Union[str, Any] = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
_UpperCamelCase : Optional[Any] = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] , __a )
else:
_UpperCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
_UpperCamelCase : str = nn.SiLU()
_UpperCamelCase : str = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
_UpperCamelCase : Dict = False
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any]=None ) -> Tuple:
_UpperCamelCase : List[str] = z
_UpperCamelCase : Dict = self.conv_in(__a )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a : Any ):
def custom_forward(*__a : Tuple ):
return module(*__a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
_UpperCamelCase : Optional[int] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
_UpperCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
_UpperCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
_UpperCamelCase : str = self.mid_block(__a , __a )
_UpperCamelCase : int = sample.to(__a )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Any = up_block(__a , __a )
# post-process
if latent_embeds is None:
_UpperCamelCase : List[str] = self.conv_norm_out(__a )
else:
_UpperCamelCase : Optional[int] = self.conv_norm_out(__a , __a )
_UpperCamelCase : Tuple = self.conv_act(__a )
_UpperCamelCase : List[Any] = self.conv_out(__a )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Tuple , __a : List[str] , __a : List[str] , __a : str=None , __a : Optional[int]="random" , __a : Any=False , __a : Optional[Any]=True ) -> List[Any]:
super().__init__()
_UpperCamelCase : Tuple = n_e
_UpperCamelCase : Tuple = vq_embed_dim
_UpperCamelCase : Union[str, Any] = beta
_UpperCamelCase : str = legacy
_UpperCamelCase : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : Any = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : Dict = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Optional[int] = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Union[str, Any] = n_e
_UpperCamelCase : List[str] = sane_index_shape
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : Optional[Any] ) -> Optional[int]:
_UpperCamelCase : str = inds.shape
assert len(__a ) > 1
_UpperCamelCase : Union[str, Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(__a )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : Any = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Dict = self.unknown_index
return new.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Optional[int] ) -> Optional[int]:
_UpperCamelCase : int = inds.shape
assert len(__a ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[int] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : str ) -> Optional[int]:
# reshape z -> (batch, height, width, channel) and flatten
_UpperCamelCase : List[str] = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[int] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
_UpperCamelCase : int = self.embedding(__a ).view(z.shape )
_UpperCamelCase : str = None
_UpperCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : List[str] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : Dict = self.remap_to_used(__a )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[str] , __a : str ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_UpperCamelCase : str = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(__a )
_UpperCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Optional[int] = self.embedding(__a )
if shape is not None:
_UpperCamelCase : Tuple = z_q.view(__a )
# reshape back to match original input shape
_UpperCamelCase : Tuple = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : List[str] , __a : Optional[Any]=False ) -> int:
_UpperCamelCase : Dict = parameters
_UpperCamelCase, _UpperCamelCase : str = torch.chunk(__a , 2 , dim=1 )
_UpperCamelCase : Tuple = torch.clamp(self.logvar , -30.0 , 20.0 )
_UpperCamelCase : Union[str, Any] = deterministic
_UpperCamelCase : Dict = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Any = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_UpperCamelCase : List[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : List[Any] = self.mean + self.std * sample
return x
def __SCREAMING_SNAKE_CASE ( self : Any , __a : List[str]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Tuple , __a : List[str]=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return self.mean
| 51
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.