code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __snake_case ( ):
lowerCamelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" )
return image
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
lowerCamelCase_ = dct.pop(UpperCAmelCase_ )
lowerCamelCase_ = val
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCamelCase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCamelCase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCamelCase_ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
lowerCamelCase_ = qkv_bias
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
lowerCamelCase_ = 364 if "coco" in model_name else 224
lowerCamelCase_ = BlipaVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCamelCase_ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCamelCase_ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "t5-xl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCamelCase_ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
lowerCamelCase_ = BlipaConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[str, Any]=False ):
lowerCamelCase_ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
lowerCamelCase_ = tokenizer("\n" , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
lowerCamelCase_ ,lowerCamelCase_ = get_blipa_config(UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase_ = BlipaForConditionalGeneration(UpperCAmelCase_ ).eval()
lowerCamelCase_ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
lowerCamelCase_ ,lowerCamelCase_ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCamelCase_ = original_model.state_dict()
lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ )
if key.startswith("Qformer.bert" ):
lowerCamelCase_ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCamelCase_ = key.replace("self" , "attention" )
if "opt_proj" in key:
lowerCamelCase_ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
lowerCamelCase_ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
lowerCamelCase_ = key.replace("opt" , "language" )
if key.startswith("t5" ):
lowerCamelCase_ = key.replace("t5" , "language" )
lowerCamelCase_ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCamelCase_ = load_demo_image()
lowerCamelCase_ = vis_processors["eval"](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
lowerCamelCase_ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(UpperCAmelCase_ )
# create processor
lowerCamelCase_ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
lowerCamelCase_ = BlipaProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
lowerCamelCase_ = processor(images=UpperCAmelCase_ , return_tensors="pt" ).pixel_values.to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "opt" in model_name:
lowerCamelCase_ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
lowerCamelCase_ = hf_model(UpperCAmelCase_ , UpperCAmelCase_ ).logits
else:
lowerCamelCase_ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
lowerCamelCase_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCamelCase_ = hf_model(UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCamelCase_ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCAmelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCamelCase_ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCAmelCase_ )
else:
# cast to same type
lowerCamelCase_ = logits.dtype
assert torch.allclose(original_logits.to(UpperCAmelCase_ ) , UpperCAmelCase_ , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
lowerCamelCase_ = ""
lowerCamelCase_ = tokenizer(UpperCAmelCase_ , return_tensors="pt" ).input_ids.to(UpperCAmelCase_ )
lowerCamelCase_ = original_model.generate({"image": original_pixel_values} )
lowerCamelCase_ = hf_model.generate(
UpperCAmelCase_ , UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , UpperCAmelCase_ )
lowerCamelCase_ = input_ids.shape[1]
lowerCamelCase_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCAmelCase_ )
lowerCamelCase_ = [text.strip() for text in output_text]
print("HF generation:" , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
a_ : Optional[Any] = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
a_ : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 55
| 1
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def _lowerCamelCase( a ):
if not isinstance(_a , _a ):
__a = F"Input value of [number={number}] must be an integer"
raise TypeError(_a )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
def _lowerCamelCase( a ):
__a = len(a )
for i in range(1 , a ):
__a = collection[i]
__a = 0
__a = i - 1
while low <= high:
__a = (low + high) // 2
if val < collection[mid]:
__a = mid - 1
else:
__a = mid + 1
for j in range(a , a , -1 ):
__a = collection[j - 1]
__a = val
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:str = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__:Any = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 268
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int =VOCAB_FILES_NAMES
a : Tuple =PRETRAINED_VOCAB_FILES_MAP
a : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] =["input_ids", "attention_mask"]
a : int =RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowerCAmelCase : Optional[int] = getattr(snake_case__ , pre_tok_state.pop("type" ) )
lowerCAmelCase : Union[str, Any] = add_prefix_space
lowerCAmelCase : Union[str, Any] = pre_tok_class(**snake_case__ )
lowerCAmelCase : int = add_prefix_space
lowerCAmelCase : Union[str, Any] = "post_processor"
lowerCAmelCase : List[str] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
lowerCAmelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase : Tuple = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase : List[Any] = tuple(state["cls"] )
lowerCAmelCase : int = False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowerCAmelCase : Union[str, Any] = add_prefix_space
lowerCAmelCase : List[Any] = True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
lowerCAmelCase : List[str] = trim_offsets
lowerCAmelCase : Optional[int] = True
if changes_to_apply:
lowerCAmelCase : Union[str, Any] = getattr(snake_case__ , state.pop("type" ) )
lowerCAmelCase : Optional[Any] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
lowerCAmelCase : Optional[Any] = value
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 108
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase : List[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 1
|
"""simple docstring"""
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a__ : Tuple = 1
@register_to_config
def __init__( self : str , _lowercase : Union[str, Any]=20_00 , _lowercase : Union[str, Any]=0.1 , _lowercase : Tuple=20 , _lowercase : str=1E-3 ):
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
def a ( self : Dict , _lowercase : int , _lowercase : Union[str, torch.device] = None ):
__UpperCAmelCase = torch.linspace(1 , self.config.sampling_eps , _lowercase , device=_lowercase )
def a ( self : List[str] , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : str , _lowercase : Any=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__UpperCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__UpperCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__UpperCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
__UpperCAmelCase = std.unsqueeze(-1 )
__UpperCAmelCase = -score / std
# compute
__UpperCAmelCase = -1.0 / len(self.timesteps )
__UpperCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__UpperCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__UpperCAmelCase = beta_t.unsqueeze(-1 )
__UpperCAmelCase = -0.5 * beta_t * x
__UpperCAmelCase = torch.sqrt(_lowercase )
__UpperCAmelCase = drift - diffusion**2 * score
__UpperCAmelCase = x + drift * dt
# add noise
__UpperCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=_lowercase , device=x.device , dtype=x.dtype )
__UpperCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : int ):
return self.config.num_train_timesteps
| 357
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class a__( lowerCamelCase__ ):
lowercase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ = Features({"""audio""": Audio()} )
lowercase__ = Features({"""labels""": ClassLabel} )
lowercase__ = "audio"
lowercase__ = "labels"
def lowercase_ ( self : Dict , __snake_case : Optional[int] ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
a : str = copy.deepcopy(self )
a : Dict = self.label_schema.copy()
a : Optional[int] = features[self.label_column]
a : str = label_schema
return task_template
@property
def lowercase_ ( self : Tuple ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 297
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _UpperCamelCase ( UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __SCREAMING_SNAKE_CASE :
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = after_output[0].numpy()
lowerCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ = model_a(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model_a(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = after_outputs[0].numpy()
lowerCAmelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@require_tf
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ = 13
lowerCAmelCase__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = TFViTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
lowerCAmelCase__ = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFViTModelTester(self )
lowerCAmelCase__ = TFBertModelTester(self )
lowerCAmelCase__ = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = vision_config_and_inputs
(
lowerCAmelCase__
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ = 13
lowerCAmelCase__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = TFDeiTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
lowerCAmelCase__ = TFRobertaModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = TFRobertaModelTester(self )
lowerCAmelCase__ = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = vision_config_and_inputs
(
lowerCAmelCase__
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ = 13
lowerCAmelCase__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
lowerCAmelCase__ = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFCLIPVisionModelTester(self )
lowerCAmelCase__ = TFBertModelTester(self )
lowerCAmelCase__ = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = vision_config_and_inputs
(
lowerCAmelCase__
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCAmelCase__ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 360
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = self.size['shortest_edge']
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image and target
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor()
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
lowerCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
lowerCAmelCase__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 122
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The output directory where the model will be written."} , )
snake_case__ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
snake_case__ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
snake_case__ = field(
default=_UpperCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
snake_case__ = field(
default=_UpperCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def a_ ( ):
UpperCAmelCase__ = HfArgumentParser((ModelArguments,) )
((UpperCAmelCase__ ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCAmelCase__ = decoder_config.decoder_start_token_id
UpperCAmelCase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCAmelCase__ = decoder_config.bos_token_id
if pad_token_id is None:
UpperCAmelCase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCAmelCase__ = decoder_config.eos_token_id
UpperCAmelCase__ = decoder_start_token_id
UpperCAmelCase__ = pad_token_id
UpperCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 98
|
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 351
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Tuple ) -> int:
_lowerCAmelCase = """laion/clap-htsat-unfused"""
_lowerCAmelCase = tempfile.mkdtemp()
def lowercase__ ( self : int , **__snake_case : Dict ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__snake_case )
def lowercase__ ( self : int , **__snake_case : List[Any] ) -> Any:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__snake_case )
def lowercase__ ( self : Any ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Dict:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
def lowercase__ ( self : int ) -> List[str]:
_lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_feature_extractor(do_normalize=__snake_case , padding_value=1.0 )
_lowerCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
_lowerCAmelCase = floats_list((3, 10_00) )
_lowerCAmelCase = feature_extractor(__snake_case , return_tensors="""np""" )
_lowerCAmelCase = processor(audios=__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
_lowerCAmelCase = """This is a test string"""
_lowerCAmelCase = processor(text=__snake_case )
_lowerCAmelCase = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ) -> int:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(__snake_case )
_lowerCAmelCase = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase__ ( self : Any ) -> Union[str, Any]:
_lowerCAmelCase = self.get_feature_extractor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ClapProcessor(tokenizer=__snake_case , feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 70
|
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase_ = list[tuple[int, int]]
lowerCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict:
UpperCAmelCase_ : Any = pos_x
UpperCAmelCase_ : str = pos_y
UpperCAmelCase_ : int = (pos_y, pos_x)
UpperCAmelCase_ : int = goal_x
UpperCAmelCase_ : Tuple = goal_y
UpperCAmelCase_ : Union[str, Any] = parent
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple:
UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = [self.start]
UpperCAmelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None:
while self.node_queue:
UpperCAmelCase_ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_ : Optional[Any] = True
return self.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ )
for node in successors:
self.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]:
UpperCAmelCase_ : List[str] = []
for action in delta:
UpperCAmelCase_ : List[Any] = parent.pos_x + action[1]
UpperCAmelCase_ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) )
return successors
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path:
UpperCAmelCase_ : Union[str, Any] = node
UpperCAmelCase_ : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Tuple = current_node.parent
path.reverse()
return path
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_ : str = True
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = current_bwd_node
UpperCAmelCase_ : List[str] = current_fwd_node
UpperCAmelCase_ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path:
UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BreadthFirstSearch(init, goal)
lowerCamelCase_ = bfs.search()
lowerCamelCase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ = bd_bfs.search()
lowerCamelCase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 268
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class snake_case_:
def __init__( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : str=7 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : str=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=3_3 , UpperCamelCase_ : List[Any]=3_2 , UpperCamelCase_ : str=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : int=3_7 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : int=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Optional[int]=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Dict = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[Any] = use_token_type_ids
lowerCAmelCase : str = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Any = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Dict = num_choices
lowerCAmelCase : str = scope
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_input_mask:
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Any = EsmModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = model(UpperCamelCase_ )
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = EsmForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Dict = EsmForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : str = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = False
__UpperCamelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = ()
__UpperCamelCase = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = EsmModelTester(self )
lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : Dict = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[Any] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = EsmModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase : Dict = EsmEmbeddings(config=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
lowerCAmelCase : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase : List[Any] = create_position_ids_from_input_ids(UpperCamelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase : str = EsmEmbeddings(config=UpperCamelCase_ )
lowerCAmelCase : Tuple = torch.empty(2 , 4 , 3_0 )
lowerCAmelCase : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase : Union[str, Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase : Dict = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase__ ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@require_torch
class snake_case_( a__ ):
@slow
def lowerCamelCase__ ( self : str ):
with torch.no_grad():
lowerCAmelCase : Dict = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
lowerCAmelCase : Optional[Any] = 3_3
lowerCAmelCase : Dict = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowerCAmelCase : Any = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : List[Any] ):
with torch.no_grad():
lowerCAmelCase : Optional[int] = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowerCAmelCase : Any = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
lowerCAmelCase : str = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 314
|
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
snake_case__ : Optional[Any] = False
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ):
set_seed(0 )
lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase : str = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
lowerCAmelCase : int = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )]
lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample
lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
| 314
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 86
| 0
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : int ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCamelCase__ : Optional[Any] =Dataset.from_dict(UpperCAmelCase )
return dataset
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Tuple =get_dataset()
UpperCamelCase__ : List[str] =make_duplicate_clusters(lowercase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : str =get_dataset()
UpperCamelCase__ : int =deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , lowercase_ )
| 362
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_SCREAMING_SNAKE_CASE : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Any =argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : Dict =parser.parse_args()
return args.f
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int="eval" ):
'''simple docstring'''
UpperCamelCase__ : Dict =os.path.join(UpperCAmelCase , F'''{split}_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , '''r''' ) as f:
return json.load(UpperCAmelCase )
raise ValueError(F'''can\'t find {path}''' )
_SCREAMING_SNAKE_CASE : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_glue.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str =f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_clm_flax.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] =f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_summarization_flax.main()
UpperCamelCase__ : Union[str, Any] =get_results(lowercase_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_mlm_flax.main()
UpperCamelCase__ : List[Any] =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_ta_mlm_flax.main()
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def _lowerCAmelCase ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase__ : Union[str, Any] =7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_ner.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any =f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_qa.main()
UpperCamelCase__ : Tuple =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 157
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: # noqa: E741
while r - l > 1:
__lowerCamelCase : int = (l + r) // 2
if v[m] >= key:
__lowerCamelCase : str = m
else:
__lowerCamelCase : Tuple = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if len(lowerCamelCase__ ) == 0:
return 0
__lowerCamelCase : Union[str, Any] = [0] * len(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : str = v[0]
for i in range(1 , len(lowerCamelCase__ ) ):
if v[i] < tail[0]:
__lowerCamelCase : Optional[Any] = v[i]
elif v[i] > tail[length - 1]:
__lowerCamelCase : List[str] = v[i]
length += 1
else:
__lowerCamelCase : str = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : torch.FloatTensor
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ = Encoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , )
UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase )
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ = Decoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = self.encoder(__UpperCamelCase )
UpperCamelCase_ = self.quant_conv(__UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCamelCase )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase )
else:
UpperCamelCase_ = h
UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase )
UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = sample
UpperCamelCase_ = self.encode(__UpperCamelCase ).latents
UpperCamelCase_ = self.decode(__UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
| 122
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase ):
lowercase = """vit_mae"""
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=0.7_5 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = qkv_bias
UpperCamelCase = decoder_num_attention_heads
UpperCamelCase = decoder_hidden_size
UpperCamelCase = decoder_num_hidden_layers
UpperCamelCase = decoder_intermediate_size
UpperCamelCase = mask_ratio
UpperCamelCase = norm_pix_loss
| 183
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowercase__ ( __UpperCamelCase )-> str:
return "".join(sorted(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase )-> list[str]:
return word_by_signature[signature(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE__ = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 183
| 1
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Any = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowercase , lowercase : List[str] = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : int = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase : Tuple = '''-''' if number.startswith('''-''' ) else ''''''
lowercase : str = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F"""{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 308
|
'''simple docstring'''
__lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
_snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
_snake_case = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_snake_case = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_snake_case = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
| 0
|
from ...processing_utils import ProcessorMixin
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "SpeechT5FeatureExtractor"
_UpperCAmelCase :Optional[int] = "SpeechT5Tokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Optional[Any] = kwargs.pop('''audio''' , _UpperCAmelCase )
lowercase__: Tuple = kwargs.pop('''text''' , _UpperCAmelCase )
lowercase__: Dict = kwargs.pop('''text_target''' , _UpperCAmelCase )
lowercase__: Dict = kwargs.pop('''audio_target''' , _UpperCAmelCase )
lowercase__: Union[str, Any] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__: List[Any] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
elif text is not None:
lowercase__: Optional[int] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowercase__: Optional[Any] = None
if audio_target is not None:
lowercase__: Union[str, Any] = self.feature_extractor(audio_target=_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Tuple = targets['''input_values''']
elif text_target is not None:
lowercase__: Dict = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = targets['''input_ids''']
else:
lowercase__: Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase__: Union[str, Any] = labels
lowercase__: Tuple = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__: Any = decoder_attention_mask
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Any = kwargs.pop('''input_values''' , _UpperCAmelCase )
lowercase__: List[Any] = kwargs.pop('''input_ids''' , _UpperCAmelCase )
lowercase__: Any = kwargs.pop('''labels''' , _UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__: List[str] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif input_ids is not None:
lowercase__: str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowercase__: Tuple = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and "input_ids" in labels[0]):
lowercase__: Tuple = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = targets['''input_ids''']
else:
lowercase__: List[Any] = self.feature_extractor.feature_size
lowercase__: List[Any] = self.feature_extractor.num_mel_bins
lowercase__: Union[str, Any] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: List[Any] = feature_size_hack
lowercase__: str = targets['''input_values''']
else:
lowercase__: Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__: Dict = labels
lowercase__: Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__: Any = decoder_attention_mask
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
| 368
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE : str = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , ) -> float:
SCREAMING_SNAKE_CASE_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
SCREAMING_SNAKE_CASE_ = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase__ : List[str] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 210
|
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 210
| 1
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
_lowercase : Optional[int] = None
_lowercase : Any = {
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
_lowercase : List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowercase__ ( snake_case_ :int , snake_case_ :List[Any]=1 , snake_case_ :Dict=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase__ ( snake_case_ :int ):
with open(snake_case__ , '''r''' ) as f:
return json.load(snake_case__ )
def lowercase__ ( snake_case_ :Dict , snake_case_ :str ):
with open(snake_case__ , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :int , snake_case_ :str , snake_case_ :List[Any]=True ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__UpperCAmelCase = os.path.join(snake_case__ , '''tmp''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__UpperCAmelCase = read_json(os.path.join(snake_case__ , '''params.json''' ) )
__UpperCAmelCase = NUM_SHARDS[model_size]
__UpperCAmelCase = params["n_layers"]
__UpperCAmelCase = params["n_heads"]
__UpperCAmelCase = n_heads // num_shards
__UpperCAmelCase = params["dim"]
__UpperCAmelCase = dim // n_heads
__UpperCAmelCase = 10_000.0
__UpperCAmelCase = 1.0 / (base ** (torch.arange(0 , snake_case__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__UpperCAmelCase = params["n_kv_heads"] # for GQA / MQA
__UpperCAmelCase = n_heads_per_shard // num_key_value_heads
__UpperCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
__UpperCAmelCase = n_heads
__UpperCAmelCase = n_heads_per_shard
__UpperCAmelCase = dim
# permute for sliced rotary
def permute(snake_case_ :List[Any] , snake_case_ :List[str]=n_heads , snake_case_ :Optional[Any]=dim , snake_case_ :Optional[int]=dim ):
return w.view(snake_case__ , dima // n_heads // 2 , 2 , snake_case__ ).transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__UpperCAmelCase = torch.load(os.path.join(snake_case__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
__UpperCAmelCase = [
torch.load(os.path.join(snake_case__ , F'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(snake_case__ )
]
__UpperCAmelCase = 0
__UpperCAmelCase = {"weight_map": {}}
for layer_i in range(snake_case__ ):
__UpperCAmelCase = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCAmelCase = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__UpperCAmelCase = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(snake_case__ , snake_case__ , snake_case__ )
for i in range(snake_case__ )
] , dim=0 , ).reshape(snake_case__ , snake_case__ ) )
__UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
snake_case__ , snake_case__ , snake_case__ )
for i in range(snake_case__ )
] , dim=0 , ).reshape(snake_case__ , snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , )
__UpperCAmelCase = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
snake_case__ , snake_case__ , snake_case__ )
for i in range(snake_case__ )
] , dim=0 , ).reshape(snake_case__ , snake_case__ )
__UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(snake_case__ )] , dim=1 )
__UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(snake_case__ )] , dim=0 )
__UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(snake_case__ )] , dim=1 )
__UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(snake_case__ )] , dim=0 )
__UpperCAmelCase = inv_freq
for k, v in state_dict.items():
__UpperCAmelCase = filename
param_count += v.numel()
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
__UpperCAmelCase = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCAmelCase = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
__UpperCAmelCase = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(snake_case__ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]['''output.weight'''] for i in range(snake_case__ )] , dim=0 ),
}
for k, v in state_dict.items():
__UpperCAmelCase = filename
param_count += v.numel()
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
# Write configs
__UpperCAmelCase = {"total_size": param_count * 2}
write_json(snake_case__ , os.path.join(snake_case__ , '''pytorch_model.bin.index.json''' ) )
__UpperCAmelCase = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
__UpperCAmelCase = params["multiple_of"] if "multiple_of" in params else 256
__UpperCAmelCase = LlamaConfig(
hidden_size=snake_case__ , intermediate_size=compute_intermediate_size(snake_case__ , snake_case__ , snake_case__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=snake_case__ , )
config.save_pretrained(snake_case__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
__UpperCAmelCase = LlamaForCausalLM.from_pretrained(snake_case__ , torch_dtype=torch.floataa , low_cpu_mem_usage=snake_case__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(snake_case__ , safe_serialization=snake_case__ )
shutil.rmtree(snake_case__ )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[Any] ):
# Initialize the tokenizer based on the `spm` model
__UpperCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__UpperCAmelCase = tokenizer_class(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=snake_case__ , help='''Whether or not to save using `safetensors`.''' )
__UpperCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__UpperCAmelCase = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , snake_case__ )
if __name__ == "__main__":
main()
| 332
|
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = ""
for word_or_phrase in separated:
if not isinstance(snake_case__, snake_case__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=A_ ):
a__ : List[Any] = ["torch", "torchsde"]
def __init__( self : Tuple , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def a ( cls : int , *_lowercase : int , **_lowercase : Tuple ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def a ( cls : List[str] , *_lowercase : Optional[Any] , **_lowercase : Tuple ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 368
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
| 0
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = '''hopper-medium-v2'''
_SCREAMING_SNAKE_CASE : str = gym.make(env_name)
_SCREAMING_SNAKE_CASE : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
_SCREAMING_SNAKE_CASE : str = env.reset()
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : List[Any] = 1000
_SCREAMING_SNAKE_CASE : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = env.step(denorm_actions)
_SCREAMING_SNAKE_CASE : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_SCREAMING_SNAKE_CASE : Optional[int] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 183
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a ( __snake_case ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = "▁" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<unk>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "</s>" , __SCREAMING_SNAKE_CASE : Union[str, AddedToken] = "<pad>" , ) -> Optional[int]:
lowerCamelCase_ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowerCamelCase_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowerCamelCase_ = token_dict['token']
lowerCamelCase_ = Tokenizer(Unigram() )
lowerCamelCase_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
lowerCamelCase_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE ),
pre_tokenizers.Punctuation(),
] )
lowerCamelCase_ = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
lowerCamelCase_ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> Tuple:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = [files]
self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[Iterator[str], Iterator[Iterator[str]]] , __SCREAMING_SNAKE_CASE : int = 8000 , __SCREAMING_SNAKE_CASE : bool = True , ) -> str:
lowerCamelCase_ = trainers.UnigramTrainer(
vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , )
self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE )
self.add_unk_id()
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = json.loads(self._tokenizer.to_str() )
lowerCamelCase_ = self.special_tokens['unk']['id']
lowerCamelCase_ = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE ) )
| 183
| 1
|
"""simple docstring"""
from statistics import mean
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list:
lowercase__ : str = 0
# Number of processes finished
lowercase__ : List[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ : int = [0] * no_of_process
# List to include calculation results
lowercase__ : Dict = [0] * no_of_process
# Sort by arrival time.
lowercase__ : str = [burst_time[i] for i in np.argsort(__lowerCamelCase )]
lowercase__ : Tuple = [process_name[i] for i in np.argsort(__lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ : Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ : Tuple = arrival_time[i]
lowercase__ : Tuple = 0
# Index showing the location of the process being performed
lowercase__ : Any = 0
# Saves the current response ratio.
lowercase__ : List[str] = 0
for i in range(0 , __lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ : List[Any] = temp
lowercase__ : Union[str, Any] = i
# Calculate the turn around time
lowercase__ : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ : Union[str, Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> list:
lowercase__ : Optional[int] = [0] * no_of_process
for i in range(0 , __lowerCamelCase ):
lowercase__ : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase_ = 5
lowerCAmelCase_ = ['A', 'B', 'C', 'D', 'E']
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 361
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = '#'
class __A :
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase__ : dict = {}
def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> None:
"""simple docstring"""
lowercase__ : str = self._trie
for char in text:
if char not in trie:
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[Any] = trie[char]
lowercase__ : Dict = True
def UpperCAmelCase ( self : Tuple ,_snake_case : str ) -> tuple | list:
"""simple docstring"""
lowercase__ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Union[str, Any] = trie[char]
else:
return []
return self._elements(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : dict ) -> tuple:
"""simple docstring"""
lowercase__ : str = []
for c, v in d.items():
lowercase__ : List[Any] = [''' '''] if c == END else [(c + s) for s in self._elements(_snake_case )]
result.extend(_snake_case )
return tuple(_snake_case )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : List[Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def __UpperCAmelCase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
__snake_case : int =[
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Any = 0
while place < len(lowerCamelCase_):
if (place + 1 < len(lowerCamelCase_)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : Dict = []
for arabic, roman in ROMAN:
((lowerCAmelCase__) , (lowerCAmelCase__)) : int = divmod(lowerCamelCase_ ,lowerCamelCase_)
result.append(roman * factor)
if number == 0:
break
return "".join(lowerCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 0
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
_UpperCAmelCase : str = 4
_UpperCAmelCase : int = (1 << p) - 1
for _ in range(p - 2 ):
_UpperCAmelCase : int = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 202
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = 'owlvit_text_model'
def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = initializer_factor
@classmethod
def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model'
def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ):
super().__init__(**A )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = initializer_factor
@classmethod
def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'owlvit'
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ):
super().__init__(**A )
if text_config is None:
_UpperCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : str = OwlViTTextConfig(**A )
_UpperCAmelCase : int = OwlViTVisionConfig(**A )
_UpperCAmelCase : Optional[Any] = projection_dim
_UpperCAmelCase : str = logit_scale_init_value
_UpperCAmelCase : Optional[Any] = return_dict
_UpperCAmelCase : str = 1.0
@classmethod
def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
@classmethod
def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : Dict = vision_config
return cls.from_dict(A , **A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _UpperCamelCase ):
@property
def snake_case_ ( self : List[str] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def snake_case_ ( self : Optional[int] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def snake_case_ ( self : str ):
return 1e-4
def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ):
_UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self : List[Any] ):
return 1_4
| 202
| 1
|
from manim import *
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Rectangle(height=0.5 , width=0.5 )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase = Rectangle(height=0.25 , width=0.25 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''CPU''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''GPU''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''Model''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
__lowercase = []
__lowercase = []
for i, rect in enumerate(lowerCAmelCase__ ):
__lowercase = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.8 )
target.move_to(lowerCAmelCase__ )
model_arr.append(lowerCAmelCase__ )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase = Text('''Disk''' , font_size=24 )
__lowercase = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
__lowercase = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) )
__lowercase = Square(0.3 )
input.set_fill(lowerCAmelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCAmelCase__ , buff=0.5 )
self.play(Write(lowerCAmelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCAmelCase__ , buff=0.02 )
self.play(MoveToTarget(lowerCAmelCase__ ) )
self.play(FadeOut(lowerCAmelCase__ ) )
__lowercase = Arrow(start=lowerCAmelCase__ , end=lowerCAmelCase__ , color=lowerCAmelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCAmelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowercase = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
__lowercase = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(lowerCAmelCase__ ) , Circumscribe(model_arr[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(model_cpu_arr[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCAmelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__lowercase = AnimationGroup(
FadeOut(lowerCAmelCase__ , run_time=0.5 ) , MoveToTarget(lowerCAmelCase__ , run_time=0.5 ) , FadeIn(lowerCAmelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCAmelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowercase = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[i] , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(model_arr[i + 1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowercase = a_c
__lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCAmelCase__ ) , FadeOut(lowerCAmelCase__ , run_time=0.5 ) , )
__lowercase = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , MoveToTarget(lowerCAmelCase__ ) )
self.wait()
| 210
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
| 210
| 1
|
"""simple docstring"""
from math import isqrt
def lowerCamelCase (a_ :int) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(a_) + 1))
def lowerCamelCase (a_ :int = 10**6) -> int:
lowercase :Tuple = 0
lowercase :List[str] = 1
lowercase :Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(a_)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = "maskformer-swin"
__A : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Dict=2_2_4 , snake_case__ : Any=4 , snake_case__ : Dict=3 , snake_case__ : str=9_6 , snake_case__ : List[str]=[2, 2, 6, 2] , snake_case__ : Optional[int]=[3, 6, 1_2, 2_4] , snake_case__ : Optional[Any]=7 , snake_case__ : int=4.0 , snake_case__ : str=True , snake_case__ : Dict=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Tuple=1e-5 , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Optional[int] = image_size
lowercase :List[Any] = patch_size
lowercase :Optional[Any] = num_channels
lowercase :Union[str, Any] = embed_dim
lowercase :Union[str, Any] = depths
lowercase :List[Any] = len(snake_case__ )
lowercase :Optional[Any] = num_heads
lowercase :Optional[Any] = window_size
lowercase :Optional[int] = mlp_ratio
lowercase :str = qkv_bias
lowercase :int = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :str = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Tuple = use_absolute_embeddings
lowercase :Union[str, Any] = layer_norm_eps
lowercase :Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase :Optional[int] = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
lowercase :Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
lowercase , lowercase :List[str] = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 172
| 0
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE :
snake_case__ : List[str] = XGLMConfig
snake_case__ : Optional[Any] = {}
snake_case__ : Optional[int] = """gelu"""
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=14 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=0.02 , ):
UpperCamelCase :Tuple = parent
UpperCamelCase :Optional[Any] = batch_size
UpperCamelCase :List[Any] = seq_length
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Tuple = use_input_mask
UpperCamelCase :List[str] = use_labels
UpperCamelCase :Union[str, Any] = vocab_size
UpperCamelCase :Tuple = d_model
UpperCamelCase :Any = num_hidden_layers
UpperCamelCase :Union[str, Any] = num_attention_heads
UpperCamelCase :Optional[Any] = ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :str = activation_dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :List[str] = max_position_embeddings
UpperCamelCase :Dict = initializer_range
UpperCamelCase :List[Any] = None
UpperCamelCase :str = 0
UpperCamelCase :Any = 2
UpperCamelCase :Any = 1
def _A ( self : int ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase :List[str] = None
if self.use_input_mask:
UpperCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :str = self.get_config()
UpperCamelCase :List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _A ( self : str ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowerCamelCase , )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Optional[Any] = config_and_inputs
UpperCamelCase :str = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
snake_case__ : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
snake_case__ : Optional[Any] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : List[str] = False
def _A ( self : List[Any] ):
UpperCamelCase :Optional[Any] = TFXGLMModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
@slow
def _A ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :str = TFXGLMModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _A ( self : Dict ):
super().test_resize_token_embeddings()
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _A ( self : Optional[Any] , __lowerCamelCase : Optional[int]=True ):
UpperCamelCase :Dict = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :str = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase :int = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
UpperCamelCase :str = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase )
@slow
def _A ( self : Tuple ):
UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCamelCase :Optional[int] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCamelCase :str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCamelCase :Any = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , seed=[7, 0] )
UpperCamelCase :str = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def _A ( self : int ):
UpperCamelCase :List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCamelCase :str = """left"""
# use different length sentences to test batching
UpperCamelCase :Union[str, Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
UpperCamelCase :str = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase )
UpperCamelCase :Optional[int] = inputs["""input_ids"""]
UpperCamelCase :Dict = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCamelCase :List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCamelCase :List[Any] = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase :str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCamelCase :str = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 )
UpperCamelCase :Any = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCamelCase :List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
| 38
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 86
| 0
|
"""simple docstring"""
import qiskit
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> qiskit.result.counts.Counts:
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCAmelCase_( lowercase_ : float ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
return quad(lowercase_ , 0 , lowercase_ , args=(lowercase_) )[0]
def lowerCAmelCase_( lowercase_ : float , lowercase_ : float ) -> float:
return math.pow(lowercase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 73
| 1
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = KandinskyVaaPriorPipeline
UpperCamelCase : Union[str, Any] = ['prompt']
UpperCamelCase : Any = ['prompt', 'negative_prompt']
UpperCamelCase : List[str] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
UpperCamelCase : List[Any] = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
__A : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__A : Dict = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__A : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__A : int = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_image_encoder
__A : Optional[Any] = self.dummy_text_encoder
__A : List[str] = self.dummy_tokenizer
__A : int = self.dummy_image_processor
__A : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=1_0.0 , )
__A : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(__lowercase ).startswith('mps' ):
__A : List[str] = torch.manual_seed(__lowercase )
else:
__A : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__A : Any = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : int = 'cpu'
__A : int = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**__lowercase )
__A : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__A : Any = pipe(**self.get_dummy_inputs(__lowercase ) )
__A : Optional[Any] = output.image_embeds
__A : Union[str, Any] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__A : Tuple = image[0, -10:]
__A : List[str] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__A : Union[str, Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self ):
__A : int = torch_device == 'cpu'
__A : Tuple = True
__A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = torch_device == 'cpu'
__A : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 280
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowercase : Any = logging.get_logger(__name__)
# General docstring
_lowercase : Dict = 'ResNetConfig'
# Base docstring
_lowercase : Union[str, Any] = 'microsoft/resnet-50'
_lowercase : List[str] = [1, 20_48, 7, 7]
# Image classification docstring
_lowercase : Any = 'microsoft/resnet-50'
_lowercase : List[str] = 'tiger cat'
_lowercase : Union[str, Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[Any] = 3 , _lowercase : Dict = 1 , _lowercase : int = "relu" ):
super().__init__()
__UpperCAmelCase = nn.Convad(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : str , _lowercase : str ):
__UpperCAmelCase = self.convolution(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.normalization(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , _lowercase : int ):
super().__init__()
__UpperCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__UpperCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__UpperCAmelCase = config.num_channels
def a ( self : List[Any] , _lowercase : int ):
__UpperCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
__UpperCAmelCase = self.embedder(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.pooler(SCREAMING_SNAKE_CASE_ )
return embedding
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] = 2 ):
super().__init__()
__UpperCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
def a ( self : List[str] , _lowercase : Optional[int] ):
__UpperCAmelCase = self.convolution(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.normalization(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : List[str] = 1 , _lowercase : Tuple = "relu" ):
super().__init__()
__UpperCAmelCase = in_channels != out_channels or stride != 1
__UpperCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=SCREAMING_SNAKE_CASE_ ) , )
__UpperCAmelCase = ACTaFN[activation]
def a ( self : Dict , _lowercase : int ):
__UpperCAmelCase = hidden_state
__UpperCAmelCase = self.layer(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__UpperCAmelCase = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self : int , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[int] = 1 , _lowercase : Any = "relu" , _lowercase : int = 4 ):
super().__init__()
__UpperCAmelCase = in_channels != out_channels or stride != 1
__UpperCAmelCase = out_channels // reduction
__UpperCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ ) , )
__UpperCAmelCase = ACTaFN[activation]
def a ( self : Optional[Any] , _lowercase : Any ):
__UpperCAmelCase = hidden_state
__UpperCAmelCase = self.layer(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
__UpperCAmelCase = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Dict , _lowercase : List[str] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : List[Any] = 2 , _lowercase : Any = 2 , ):
super().__init__()
__UpperCAmelCase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__UpperCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self : Union[str, Any] , _lowercase : Dict ):
__UpperCAmelCase = input
for layer in self.layers:
__UpperCAmelCase = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , _lowercase : List[Any] ):
super().__init__()
__UpperCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ ) )
def a ( self : Tuple , _lowercase : Any , _lowercase : Optional[Any] = False , _lowercase : str = True ):
__UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase = hidden_states + (hidden_state,)
__UpperCAmelCase = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
class _UpperCAmelCase ( a__ ):
a__ : int = ResNetConfig
a__ : List[str] = "resnet"
a__ : int = "pixel_values"
a__ : Optional[Any] = True
def a ( self : Union[str, Any] , _lowercase : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : Optional[int] , _lowercase : List[Any] , _lowercase : Optional[int]=False ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCAmelCase = value
_lowercase : List[str] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowercase : Dict = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , a__ , )
class _UpperCAmelCase ( a__ ):
def __init__( self : Any , _lowercase : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = config
__UpperCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[str] , _lowercase : List[str] , _lowercase : Optional[int] = None , _lowercase : int = None ):
__UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase = self.embedder(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = encoder_outputs[0]
__UpperCAmelCase = self.pooler(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class _UpperCAmelCase ( a__ ):
def __init__( self : Dict , _lowercase : Tuple ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = config.num_labels
__UpperCAmelCase = ResNetModel(SCREAMING_SNAKE_CASE_ )
# classification head
__UpperCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : Any , _lowercase : int = None , _lowercase : List[str] = None , _lowercase : Any = None , _lowercase : Optional[Any] = None , ):
__UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase = self.resnet(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase = self.classifier(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase = 'single_label_classification'
else:
__UpperCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
__UpperCAmelCase = MSELoss()
if self.num_labels == 1:
__UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__UpperCAmelCase = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase = BCEWithLogitsLoss()
__UpperCAmelCase = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__UpperCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , a__ , )
class _UpperCAmelCase ( a__ , a__ ):
def __init__( self : Tuple , _lowercase : Dict ):
super().__init__(SCREAMING_SNAKE_CASE_ )
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = [config.embedding_size] + config.hidden_sizes
__UpperCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def a ( self : str , _lowercase : List[str] , _lowercase : Optional[int] = None , _lowercase : Optional[Any] = None ):
__UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase = self.embedder(SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = self.encoder(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__UpperCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE_ , )
| 364
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : List[str] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_lowercase : Optional[Any] = {'mobilebert-uncased': 5_12}
_lowercase : Optional[int] = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = MobileBertTokenizer
def __init__( self : int , _lowercase : Union[str, Any]=None , _lowercase : int=None , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : Union[str, Any]="[SEP]" , _lowercase : str="[PAD]" , _lowercase : List[Any]="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : Any=True , _lowercase : str=None , **_lowercase : Dict , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : str=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_A : int = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_A : str = {
"""allenai/longformer-base-4096""": 40_96,
"""allenai/longformer-large-4096""": 40_96,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 40_96,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 40_96,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __magic_name__ ( ) -> str:
lowercase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase : List[Any] = bs[:]
lowercase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[Any]:
lowercase : List[str] = set()
lowercase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : Optional[int] = char
return pairs
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ):
lowercase : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
lowercase : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
lowercase : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding="utf-8" ) as vocab_handle:
lowercase : Optional[int] = json.load(_a )
lowercase : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowercase : Dict = errors # how to handle errors in decoding
lowercase : List[str] = bytes_to_unicode()
lowercase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
lowercase : Any = merges_handle.read().split("\n" )[1:-1]
lowercase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
lowercase : List[str] = dict(zip(_a , range(len(_a ) ) ) )
lowercase : Tuple = {}
lowercase : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __magic_name__ ( self ):
return len(self.encoder )
def __magic_name__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , _a ):
if token in self.cache:
return self.cache[token]
lowercase : List[Any] = tuple(_a )
lowercase : List[str] = get_pairs(_a )
if not pairs:
return token
while True:
lowercase : Optional[Any] = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Union[str, Any] = bigram
lowercase : List[Any] = []
lowercase : Dict = 0
while i < len(_a ):
try:
lowercase : Union[str, Any] = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Any = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : Dict = tuple(_a )
lowercase : int = new_word
if len(_a ) == 1:
break
else:
lowercase : Tuple = get_pairs(_a )
lowercase : Optional[int] = " ".join(_a )
lowercase : Union[str, Any] = word
return word
def __magic_name__ ( self , _a ):
lowercase : List[Any] = []
for token in re.findall(self.pat , _a ):
lowercase : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(" " ) )
return bpe_tokens
def __magic_name__ ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , _a ):
return self.decoder.get(_a )
def __magic_name__ ( self , _a ):
lowercase : Optional[int] = "".join(_a )
lowercase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : List[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase : Any = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + "\n" )
lowercase : List[str] = 0
with open(_a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase : Dict = token_index
writer.write(" ".join(_a ) + "\n" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
lowercase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __magic_name__ ( self , _a , _a = None ):
lowercase : str = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , _a , _a=False , **_a ):
lowercase : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
lowercase : Any = " " + text
return (text, kwargs)
| 202
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : List[str] = {"""vocab_file""": """spiece.model"""}
_A : List[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
_A : str = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ):
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowercase : Optional[Any] = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __magic_name__ ( self ):
return self.sp_model.get_piece_size()
def __magic_name__ ( self ):
lowercase : str = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
lowercase : Union[str, Any] = self.__dict__.copy()
lowercase : Dict = None
return state
def __setstate__( self , _a ):
lowercase : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase : List[str] = {}
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , _a ):
return self.sp_model.encode(_a , out_type=_a )
def __magic_name__ ( self , _a ):
return self.sp_model.piece_to_id(_a )
def __magic_name__ ( self , _a ):
lowercase : Union[str, Any] = self.sp_model.IdToPiece(_a )
return token
def __magic_name__ ( self , _a ):
lowercase : List[Any] = []
lowercase : List[Any] = ""
lowercase : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowercase : int = True
lowercase : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
lowercase : int = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __magic_name__ ( self , _a , _a = False , _a = None , _a = True , **_a , ):
lowercase : int = kwargs.pop("use_source_tokenizer" , _a )
lowercase : Union[str, Any] = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase : Any = []
lowercase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
lowercase : int = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowercase : Dict = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(_a ) )
else:
lowercase : Union[str, Any] = "".join(_a )
lowercase : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase : Union[str, Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : List[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
lowercase : int = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __magic_name__ ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
lowercase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 202
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( SCREAMING_SNAKE_CASE__ = "laptop") -> DataFrame:
__snake_case: Dict = F'''https://www.amazon.in/laptop/s?k={product}'''
__snake_case: Any = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__snake_case: List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__).text)
# Initialize a Pandas dataframe with the column titles
__snake_case: str = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""}) , ):
try:
__snake_case: List[Any] = item.ha.text
__snake_case: Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""]
__snake_case: Union[str, Any] = item.find("""span""" , attrs={"""class""": """a-offscreen"""}).text
try:
__snake_case: Any = item.find("""span""" , attrs={"""class""": """a-icon-alt"""}).text
except AttributeError:
__snake_case: Any = """Not available"""
try:
__snake_case: List[Any] = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""}).text.split("""₹""")[1]
)
except AttributeError:
__snake_case: Union[str, Any] = """"""
try:
__snake_case: List[Any] = float(
(
(
float(product_mrp.strip("""₹""").replace(""",""" , """"""))
- float(product_price.strip("""₹""").replace(""",""" , """"""))
)
/ float(product_mrp.strip("""₹""").replace(""",""" , """"""))
)
* 100)
except ValueError:
__snake_case: Tuple = float("""nan""")
except AttributeError:
pass
__snake_case: str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__snake_case: Optional[int] = """ """
__snake_case: Optional[Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = "headphones"
get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv')
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = """biogpt"""
def __init__( self , __A=4_2384 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=1024 , __A=0.02 , __A=1e-12 , __A=True , __A=True , __A=0.0 , __A=0.0 , __A=1 , __A=0 , __A=2 , **__A , ):
"""simple docstring"""
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Union[str, Any] = scale_embedding
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Optional[int] = layerdrop
lowerCamelCase : List[Any] = activation_dropout
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 283
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_a : str= logging.get_logger(__name__)
_a : str= {"vocab_file": "spiece.model"}
_a : Tuple= {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
_a : int= {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
_a : Optional[int]= 0
_a : str= 1
_a : Tuple= 2
_a : str= 3
_a : Optional[Any]= 4
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : str = """left"""
def __init__(self : List[Any] , _A : List[str] , _A : int=False , _A : Tuple=True , _A : Optional[Any]=False , _A : List[Any]="<s>" , _A : Dict="</s>" , _A : str="<unk>" , _A : Optional[Any]="<sep>" , _A : Optional[Any]="<pad>" , _A : Optional[Any]="<cls>" , _A : Dict="<mask>" , _A : List[Any]=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : str = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
__snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Tuple = 3
__snake_case : Optional[int] = do_lower_case
__snake_case : Union[str, Any] = remove_space
__snake_case : Dict = keep_accents
__snake_case : str = vocab_file
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def _lowercase (self : Dict) -> List[str]:
return len(self.sp_model)
def _lowercase (self : Dict) -> Union[str, Any]:
__snake_case : str = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : Union[str, Any]) -> List[str]:
__snake_case : Optional[Any] = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> str:
__snake_case : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : List[Any] = {}
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowercase (self : Any , _A : Tuple) -> List[str]:
if self.remove_space:
__snake_case : List[Any] = ' '.join(inputs.strip().split())
else:
__snake_case : Tuple = inputs
__snake_case : int = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case : str = unicodedata.normalize('NFKD' , _A)
__snake_case : Tuple = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
__snake_case : Union[str, Any] = outputs.lower()
return outputs
def _lowercase (self : List[Any] , _A : str) -> List[str]:
__snake_case : int = self.preprocess_text(_A)
__snake_case : Dict = self.sp_model.encode(_A , out_type=_A)
__snake_case : Union[str, Any] = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case : List[str] = cur_pieces[1:]
else:
__snake_case : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Any:
return self.sp_model.PieceToId(_A)
def _lowercase (self : Tuple , _A : str) -> Optional[int]:
return self.sp_model.IdToPiece(_A)
def _lowercase (self : List[str] , _A : Dict) -> List[Any]:
__snake_case : str = ''.join(_A).replace(_A , ' ').strip()
return out_string
def _lowercase (self : Dict , _A : List[int] , _A : bool = False , _A : bool = None , _A : bool = True , **_A : str , ) -> str:
__snake_case : Tuple = kwargs.pop('use_source_tokenizer' , _A)
__snake_case : Tuple = self.convert_ids_to_tokens(_A , skip_special_tokens=_A)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case : List[str] = []
__snake_case : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
__snake_case : List[Any] = []
sub_texts.append(_A)
else:
current_sub_text.append(_A)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case : Optional[int] = ''.join(_A)
__snake_case : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case : str = self.clean_up_tokenization(_A)
return clean_text
else:
return text
def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1, 1]
return ([0] * len(_A)) + [1, 1]
def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : str = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
__snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 172
| 0
|
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : list[list[str]] = [[] for _ in range(UpperCAmelCase_ )]
_UpperCamelCase : Tuple = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(UpperCAmelCase_ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Any = position % (lowest * 2) # puts it in bounds
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase_ )
_UpperCamelCase : List[str] = [''.join(UpperCAmelCase_ ) for row in temp_grid]
_UpperCamelCase : Optional[Any] = ''.join(UpperCAmelCase_ )
return output_string
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[str] = []
_UpperCamelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_UpperCamelCase : list[list[str]] = [[] for _ in range(UpperCAmelCase_ )] # generates template
for position in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Optional[Any] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase : Tuple = min(UpperCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_UpperCamelCase : Optional[int] = 0
for row in temp_grid: # fills in the characters
_UpperCamelCase : List[Any] = input_string[counter : counter + len(UpperCAmelCase_ )]
grid.append(list(UpperCAmelCase_ ) )
counter += len(UpperCAmelCase_ )
_UpperCamelCase : int = '' # reads as zigzag
for position in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Optional[int] = position % (lowest * 2) # puts it in bounds
_UpperCamelCase : int = min(UpperCAmelCase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = {}
for key_guess in range(1 , len(UpperCAmelCase_ ) ): # tries every key
_UpperCamelCase : Union[str, Any] = decrypt(UpperCAmelCase_ , UpperCAmelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCamelCase : str = (low + high) // 2
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = max_subarray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = max_subarray(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = max_cross_sum(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[Any] = float('-inf' ), -1
_UpperCamelCase , _UpperCamelCase : int = float('-inf' ), -1
_UpperCamelCase : int | float = 0
for i in range(UpperCAmelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCamelCase : Optional[int] = summ
_UpperCamelCase : Union[str, Any] = i
_UpperCamelCase : List[Any] = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCamelCase : List[Any] = summ
_UpperCamelCase : List[str] = i
return max_left, max_right, (left_sum + right_sum)
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = [randint(1 , UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ )]
_UpperCamelCase : Optional[Any] = time.time()
max_subarray(UpperCAmelCase_ , 0 , input_size - 1 )
_UpperCamelCase : str = time.time()
return end - start
def A__ ( ):
_UpperCamelCase : Any = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCamelCase : Dict = [time_max_subarray(UpperCAmelCase_ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
print(UpperCAmelCase_ , '\t\t' , UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73
|
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73
| 1
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_UpperCamelCase = logging.getLogger(__name__)
def lowercase_ ( lowerCAmelCase__ : torch.nn.Module , lowerCAmelCase__ : BnbQuantizationConfig , lowerCAmelCase__ : Union[str, os.PathLike] = None , lowerCAmelCase__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , lowerCAmelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = bnb_quantization_config.load_in_abit
__UpperCAmelCase : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
__UpperCAmelCase : Tuple = []
# custom device map
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1:
__UpperCAmelCase : List[str] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__UpperCAmelCase : str = get_keys_to_not_convert(lowerCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ )
__UpperCAmelCase : Any = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__UpperCAmelCase : Dict = []
__UpperCAmelCase : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase__ )
# compatibility with peft
__UpperCAmelCase : str = load_in_abit
__UpperCAmelCase : Optional[Any] = load_in_abit
__UpperCAmelCase : Dict = get_parameter_device(lowerCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
__UpperCAmelCase : Dict = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
# convert param to the right dtype
__UpperCAmelCase : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__UpperCAmelCase : Union[str, Any] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
__UpperCAmelCase : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase__ ):
param.to(lowerCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
__UpperCAmelCase : Union[str, Any] = replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
__UpperCAmelCase : List[Any] = get_quantized_model_device_map(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[str] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
__UpperCAmelCase : Tuple = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
__UpperCAmelCase : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : str = special_dtypes
__UpperCAmelCase : int = no_split_module_classes
__UpperCAmelCase : List[str] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__UpperCAmelCase : List[str] = get_balanced_memory(
lowerCAmelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , )
__UpperCAmelCase : Any = max_memory
__UpperCAmelCase : Union[str, Any] = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# check if don't have any quantized module on the cpu
__UpperCAmelCase : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__UpperCAmelCase : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
__UpperCAmelCase : str = []
__UpperCAmelCase , __UpperCAmelCase : Dict = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict=None , ):
"""simple docstring"""
__UpperCAmelCase : Any = False
for name, module in model.named_children():
if current_key_name is None:
__UpperCAmelCase : str = []
current_key_name.append(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__UpperCAmelCase : List[Any] = """.""".join(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__UpperCAmelCase : Optional[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__UpperCAmelCase : List[str] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__UpperCAmelCase : Any = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
__UpperCAmelCase : Optional[Any] = module.weight.data
if module.bias is not None:
__UpperCAmelCase : Dict = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : str = True
if len(list(module.children() ) ) > 0:
__UpperCAmelCase , __UpperCAmelCase : Any = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
with init_empty_weights():
__UpperCAmelCase : Optional[Any] = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__UpperCAmelCase : str = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__UpperCAmelCase : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCAmelCase : Any = sum(lowerCAmelCase__ , [] )
__UpperCAmelCase : Dict = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
__UpperCAmelCase : Optional[Any] = False
if hasattr(lowerCAmelCase__ , """base_model_prefix""" ):
__UpperCAmelCase : Optional[Any] = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCAmelCase : Union[str, Any] = list(model.named_children() )
__UpperCAmelCase : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCAmelCase : int = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
__UpperCAmelCase : Dict = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
__UpperCAmelCase : Tuple = [""".weight""", """.bias"""]
__UpperCAmelCase : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCAmelCase : Dict = name.replace(lowerCAmelCase__ , """""" )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
def lowercase_ ( lowerCAmelCase__ : Tuple ):
"""simple docstring"""
for m in model.modules():
if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( lowerCAmelCase__ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ )
__UpperCAmelCase : Any = param_name
__UpperCAmelCase : Union[str, Any] = model
if "." in tensor_name:
__UpperCAmelCase : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
__UpperCAmelCase : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__UpperCAmelCase : Any = new_module
__UpperCAmelCase : Tuple = splits[-1]
# offload weights
__UpperCAmelCase : List[str] = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase__ , index=lowerCAmelCase__ , )
else:
offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
offload_weight(lowerCAmelCase__ , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase__ , index=lowerCAmelCase__ )
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , """meta""" , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
| 16
|
'''simple docstring'''
from statistics import mean
import numpy as np
def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Tuple = 0
# Number of processes finished
__UpperCAmelCase : Optional[int] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__UpperCAmelCase : Tuple = [0] * no_of_process
# List to include calculation results
__UpperCAmelCase : int = [0] * no_of_process
# Sort by arrival time.
__UpperCAmelCase : Dict = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )]
__UpperCAmelCase : Union[str, Any] = [process_name[i] for i in np.argsort(lowerCAmelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__UpperCAmelCase : Dict = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__UpperCAmelCase : Any = arrival_time[i]
__UpperCAmelCase : Any = 0
# Index showing the location of the process being performed
__UpperCAmelCase : Any = 0
# Saves the current response ratio.
__UpperCAmelCase : List[str] = 0
for i in range(0 , lowerCAmelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__UpperCAmelCase : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__UpperCAmelCase : Tuple = temp
__UpperCAmelCase : List[str] = i
# Calculate the turn around time
__UpperCAmelCase : Tuple = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__UpperCAmelCase : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowercase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [0] * no_of_process
for i in range(0 , lowerCAmelCase__ ):
__UpperCAmelCase : List[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCamelCase = 5
_UpperCamelCase = ['''A''', '''B''', '''C''', '''D''', '''E''']
_UpperCamelCase = [1, 2, 3, 4, 5]
_UpperCamelCase = [1, 2, 3, 4, 5]
_UpperCamelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCamelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 16
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=2 ,A__=3 ,A__=4 ,A__=2 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_6 ,A__=3 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=6 ,A__=6 ,A__=3 ,A__=4 ,A__=None ,A__=1_0_0_0 ,):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = text_seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size)
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels)
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = LayoutLMvaModel(config=A__)
model.to(A__)
model.eval()
# text + image
lowercase = model(A__ ,pixel_values=A__)
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,bbox=A__ ,pixel_values=A__ ,token_type_ids=A__)
lowercase = model(A__ ,bbox=A__ ,pixel_values=A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
# text only
lowercase = model(A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowercase = model(pixel_values=A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = LayoutLMvaForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = LayoutLMvaForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = LayoutLMvaForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =False
lowercase_ : Any =False
lowercase_ : Tuple =False
lowercase_ : Any =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def A__ ( self):
lowercase = LayoutLMvaModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = copy.deepcopy(A__)
if model_class in get_values(A__):
lowercase = {
k: v.unsqueeze(1).expand(-1 ,self.model_tester.num_choices ,-1).contiguous()
if isinstance(A__ ,torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A__):
lowercase = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in get_values(A__):
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A__ ,)
return inputs_dict
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
@slow
def A__ ( self):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LayoutLMvaModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return LayoutLMvaImageProcessor(apply_ocr=A__) if is_vision_available() else None
@slow
def A__ ( self):
lowercase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''').to(A__)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''pt''').pixel_values.to(A__)
lowercase = torch.tensor([[1, 2]])
lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
lowercase = model(
input_ids=input_ids.to(A__) ,bbox=bbox.to(A__) ,pixel_values=pixel_values.to(A__) ,)
# verify the logits
lowercase = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape ,A__)
lowercase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(A__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A__ ,atol=1E-4))
| 101
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 86
| 0
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , UpperCAmelCase__ )
UpperCamelCase_: Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase_: Dict = dataset_size < in_memory_max_size
else:
UpperCamelCase_: str = False
UpperCamelCase_: List[Any] = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 292
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def snake_case (UpperCAmelCase__ ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCamelCase_: List[str] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> PegasusForConditionalGeneration:
UpperCamelCase_: List[str] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusConfig(**UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusForConditionalGeneration(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = torch_model.model.state_dict()
UpperCamelCase_: str = {}
for k, v in tf_weights.items():
UpperCamelCase_: Dict = rename_state_dict_key(UpperCAmelCase__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
UpperCamelCase_: int = v.T
UpperCamelCase_: Union[str, Any] = torch.tensor(UpperCAmelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
UpperCamelCase_: Tuple = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCamelCase_: int = mapping['shared.weight']
UpperCamelCase_: Union[str, Any] = mapping['shared.weight']
UpperCamelCase_: Dict = {k: torch.zeros_like(UpperCAmelCase__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = torch_model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
UpperCamelCase_: List[str] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case (UpperCAmelCase__="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCamelCase_: Union[str, Any] = tf.train.list_variables(UpperCAmelCase__ )
UpperCamelCase_: Tuple = {}
UpperCamelCase_: Dict = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase_: Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase_: Dict = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = array
return tf_weights
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# save tokenizer first
UpperCamelCase_: Any = Path(UpperCAmelCase__ ).parent.name
UpperCamelCase_: Tuple = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
UpperCamelCase_: Optional[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase__ )
# convert model
UpperCamelCase_: Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ )
UpperCamelCase_: Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
UpperCamelCase_: Union[str, Any] = task_specific_params
UpperCamelCase_: Tuple = convert_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase__ , Path(UpperCAmelCase__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Optional[Any] = parser.parse_args()
if args.save_dir is None:
A_ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
A_ : Optional[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 292
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __lowerCAmelCase :
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None # sigma(t_i)
@classmethod
def lowerCamelCase__ ( cls :str ):
'''simple docstring'''
return cls()
@dataclass
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
@property
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :List[Any] , __magic_name__ :Optional[Any] = 0.02 , __magic_name__ :List[Any] = 100 , __magic_name__ :List[Any] = 1.007 , __magic_name__ :str = 80 , __magic_name__ :int = 0.05 , __magic_name__ :Optional[int] = 50 , ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :List[str] = () ):
'''simple docstring'''
a = jnp.arange(0 , __UpperCAmelCase )[::-1].copy()
a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__UpperCAmelCase , schedule=jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , timesteps=__UpperCAmelCase , )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Any , __magic_name__ :int , __magic_name__ :Union[str, Any] , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
a = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
a = 0
# sample eps ~ N(0, S_noise^2 * I)
a = random.split(__UpperCAmelCase , num=1 )
a = self.config.s_noise * random.normal(key=__UpperCAmelCase , shape=sample.shape )
a = sigma + gamma * sigma
a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ ( self :str , __magic_name__ :Tuple , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] = True , ):
'''simple docstring'''
a = sample_hat + sigma_hat * model_output
a = (sample_hat - pred_original_sample) / sigma_hat
a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__UpperCAmelCase , derivative=__UpperCAmelCase , state=__UpperCAmelCase )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :Optional[int] = True , ):
'''simple docstring'''
a = sample_prev + sigma_prev * model_output
a = (sample_prev - pred_original_sample) / sigma_prev
a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__UpperCAmelCase , derivative=__UpperCAmelCase , state=__UpperCAmelCase )
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :Any , __magic_name__ :Dict ):
'''simple docstring'''
raise NotImplementedError()
| 228
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = """sigmoid"""
__magic_name__ :Optional[Any] = """softmax"""
__magic_name__ :Optional[Any] = """none"""
@add_end_docstrings(
a , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = False
__magic_name__ :Dict = ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = tokenizer_kwargs
lowerCAmelCase__ :List[Any] = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
lowerCAmelCase__ :int = top_k
lowerCAmelCase__ :Dict = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , )
if return_all_scores:
lowerCAmelCase__ :List[Any] = None
else:
lowerCAmelCase__ :Union[str, Any] = 1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase__ :List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase__ :str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply
else:
lowerCAmelCase__ :Dict = ClassificationFunction.NONE
lowerCAmelCase__ :int = model_outputs['logits'][0]
lowerCAmelCase__ :Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase__ :int = softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase__ :Tuple = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase__ :Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
lowerCAmelCase__ :List[str] = dict_scores[:top_k]
return dict_scores
| 293
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
A__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class lowercase__ :
_UpperCAmelCase :str
_UpperCAmelCase :str
_UpperCAmelCase :Optional[str] = None
_UpperCAmelCase :Optional[str] = None
_UpperCAmelCase :Optional[str] = None
@dataclass(frozen=snake_case__ )
class lowercase__ :
_UpperCAmelCase :List[int]
_UpperCAmelCase :Optional[List[int]] = None
_UpperCAmelCase :Optional[List[int]] = None
_UpperCAmelCase :Optional[Union[int, float]] = None
_UpperCAmelCase :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[InputFeatures]
def __init__( self : Any , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : str , snake_case__ : Optional[int] = None , snake_case__ : Optional[Any]=False , snake_case__ : bool = False , ):
lowerCamelCase_ : List[str] =hans_processors[task]()
lowerCamelCase_ : List[Any] =os.path.join(
snake_case__ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(snake_case__ ) , snake_case__ , ) , )
lowerCamelCase_ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ , lowerCamelCase_ : List[str] =label_list[2], label_list[1]
lowerCamelCase_ : int =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : Optional[Any] =cached_features_file + ".lock"
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowerCamelCase_ : int =torch.load(snake_case__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowerCamelCase_ : List[Any] =(
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info("Training examples: %s" , len(snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =hans_convert_examples_to_features(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
logger.info("Saving features into cached file %s" , snake_case__ )
torch.save(self.features , snake_case__ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : Dict , snake_case__ : str ):
return self.features[i]
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowercase__ :
_UpperCAmelCase :List[InputFeatures]
def __init__( self : Tuple , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : str , snake_case__ : Optional[int] = 128 , snake_case__ : List[str]=False , snake_case__ : bool = False , ):
lowerCamelCase_ : Any =hans_processors[task]()
lowerCamelCase_ : str =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ , lowerCamelCase_ : Any =label_list[2], label_list[1]
lowerCamelCase_ : int =label_list
lowerCamelCase_ : Optional[int] =processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
lowerCamelCase_ : Tuple =hans_convert_examples_to_features(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase_ : Dict =tf.data.Dataset.from_generator(
snake_case__ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase__ ( self : Dict ):
return self.dataset
def __len__( self : int ):
return len(self.features )
def __getitem__( self : List[str] , snake_case__ : Tuple ):
return self.features[i]
def UpperCAmelCase__ ( self : Tuple ):
return self.label_list
class lowercase__ ( snake_case__ ):
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[str] ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ , "heuristics_train_set.txt" ) ) , "train" )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : int ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def UpperCAmelCase__ ( self : int ):
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase__ ( self : int , snake_case__ : Any , snake_case__ : Dict ):
lowerCamelCase_ : List[Any] =[]
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
lowerCamelCase_ : Any ="%s-%s" % (set_type, line[0])
lowerCamelCase_ : int =line[5]
lowerCamelCase_ : Optional[Any] =line[6]
lowerCamelCase_ : Dict =line[7][2:] if line[7].startswith("ex" ) else line[7]
lowerCamelCase_ : Optional[int] =line[0]
examples.append(InputExample(guid=snake_case__ , text_a=snake_case__ , text_b=snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
return examples
def _snake_case ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> List[Any]:
lowerCamelCase_ : Any ={label: i for i, label in enumerate(lowerCamelCase__ )}
lowerCamelCase_ : Dict =[]
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
lowerCamelCase_ : Optional[int] =tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="max_length" , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
lowerCamelCase_ : int =label_map[example.label] if example.label in label_map else 0
lowerCamelCase_ : List[Any] =int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
A__ : str = {
'hans': 3,
}
A__ : Tuple = {
'hans': HansProcessor,
}
| 209
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : int = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
A__ : str = 'hopper-medium-v2'
A__ : Dict = gym.make(env_name)
A__ : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
A__ : Dict = env.reset()
A__ : Optional[int] = 0
A__ : str = 0
A__ : List[Any] = 1_000
A__ : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : Any = env.step(denorm_actions)
A__ : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 209
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ["""pixel_values"""]
def __init__( self : str , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
A: int = size if size is not None else {"height": 2_56, "width": 2_56}
A: Union[str, Any] = get_size_dict(_lowerCAmelCase )
A: Tuple = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
A: List[Any] = get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
A: Dict = do_resize
A: int = size
A: Any = resample
A: Optional[Any] = do_center_crop
A: Any = crop_size
A: List[Any] = do_rescale
A: int = rescale_factor
A: List[Any] = do_normalize
A: Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Dict:
'''simple docstring'''
A: Any = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_lowerCAmelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[Any]:
'''simple docstring'''
A: Dict = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Dict:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[Any]:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> str:
'''simple docstring'''
A: List[str] = do_resize if do_resize is not None else self.do_resize
A: int = resample if resample is not None else self.resample
A: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
A: Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
A: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
A: List[str] = do_normalize if do_normalize is not None else self.do_normalize
A: Any = image_mean if image_mean is not None else self.image_mean
A: Any = image_std if image_std is not None else self.image_std
A: List[str] = size if size is not None else self.size
A: str = get_size_dict(_lowerCAmelCase )
A: int = crop_size if crop_size is not None else self.crop_size
A: int = get_size_dict(_lowerCAmelCase , param_name='''crop_size''' )
A: Optional[Any] = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A: Tuple = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
A: Optional[Any] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
A: Optional[int] = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
A: Optional[Any] = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
A: Tuple = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
A: int = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
A: List[str] = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 319
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_UpperCAmelCase : Dict = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 236
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__a :Tuple = logging.get_logger(__name__)
__a :Optional[Any] = 'Hello world! cécé herlolip'
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : bool ):
"""simple docstring"""
A_ = FairseqRobertaModel.from_pretrained(__UpperCamelCase )
roberta.eval() # disable dropout
A_ = roberta.model.encoder.sentence_encoder
A_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
A_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__UpperCamelCase )
A_ = XLMRobertaXLForSequenceClassification(__UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ = roberta_sent_encoder.embed_tokens.weight
A_ = roberta_sent_encoder.embed_positions.weight
A_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ = roberta_sent_encoder.layer_norm.weight
A_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ = model.roberta.encoder.layer[i]
A_ = roberta_sent_encoder.layers[i]
A_ = layer.attention
A_ = roberta_layer.self_attn_layer_norm.weight
A_ = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ = roberta_layer.self_attn.q_proj.weight
A_ = roberta_layer.self_attn.q_proj.bias
A_ = roberta_layer.self_attn.k_proj.weight
A_ = roberta_layer.self_attn.k_proj.bias
A_ = roberta_layer.self_attn.v_proj.weight
A_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ = roberta_layer.self_attn.out_proj.weight
A_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ = roberta_layer.final_layer_norm.weight
A_ = roberta_layer.final_layer_norm.bias
# intermediate
A_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ = roberta_layer.fca.weight
A_ = roberta_layer.fca.bias
# output
A_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ = roberta_layer.fca.weight
A_ = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ = roberta.model.classification_heads["mnli"].dense.weight
A_ = roberta.model.classification_heads["mnli"].dense.bias
A_ = roberta.model.classification_heads["mnli"].out_proj.weight
A_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A_ = roberta.model.encoder.lm_head.dense.weight
A_ = roberta.model.encoder.lm_head.dense.bias
A_ = roberta.model.encoder.lm_head.layer_norm.weight
A_ = roberta.model.encoder.lm_head.layer_norm.bias
A_ = roberta.model.encoder.lm_head.weight
A_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ = roberta.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
A_ = model(__UpperCamelCase )[0]
if classification_head:
A_ = roberta.model.classification_heads["mnli"](roberta.extract_features(__UpperCamelCase ) )
else:
A_ = roberta.model(__UpperCamelCase )[0]
print(our_output.shape ,their_output.shape )
A_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ = torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__a :Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 329
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329
| 1
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCAmelCase_ = logging.getLogger(__name__)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , ) -> int:
lowercase__ : Dict = bnb_quantization_config.load_in_abit
lowercase__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowercase__ : List[str] = []
# custom device map
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1:
lowercase__ : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase__ : Any = get_keys_to_not_convert(__lowerCamelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCamelCase )
lowercase__ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase__ : Union[str, Any] = []
lowercase__ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCamelCase )
# compatibility with peft
lowercase__ : Optional[Any] = load_in_abit
lowercase__ : Optional[Any] = load_in_abit
lowercase__ : str = get_parameter_device(__lowerCamelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowercase__ : Dict = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
# convert param to the right dtype
lowercase__ : Dict = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase__ : Any = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowercase__ : Dict = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCamelCase ):
param.to(__lowerCamelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowercase__ : int = replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase )
lowercase__ : List[str] = get_quantized_model_device_map(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase__ : List[str] = True
lowercase__ : int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None ) -> int:
if device_map is None:
if torch.cuda.is_available():
lowercase__ : Optional[int] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowercase__ : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase__ : List[str] = {}
lowercase__ : Tuple = special_dtypes
lowercase__ : Optional[int] = no_split_module_classes
lowercase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase__ : Optional[Any] = get_balanced_memory(
__lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , )
lowercase__ : int = max_memory
lowercase__ : List[Any] = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
# check if don't have any quantized module on the cpu
lowercase__ : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase__ : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> str:
if modules_to_not_convert is None:
lowercase__ : Optional[int] = []
lowercase__ , lowercase__ : Optional[Any] = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int:
lowercase__ : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
lowercase__ : Optional[int] = []
current_key_name.append(__lowerCamelCase )
if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase__ : Union[str, Any] = '''.'''.join(__lowerCamelCase )
lowercase__ : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase__ : int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase__ : Optional[int] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase__ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowercase__ : List[str] = module.weight.data
if module.bias is not None:
lowercase__ : Dict = module.bias.data
bnb_module.requires_grad_(__lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Tuple = True
if len(list(module.children() ) ) > 0:
lowercase__ , lowercase__ : int = _replace_with_bnb_layers(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
# Create a copy of the model
with init_empty_weights():
lowercase__ : int = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase__ : Optional[Any] = find_tied_parameters(__lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase__ : Optional[int] = sum(__lowerCamelCase , [] )
lowercase__ : List[str] = len(__lowerCamelCase ) > 0
# Check if it is a base model
lowercase__ : Any = False
if hasattr(__lowerCamelCase , '''base_model_prefix''' ):
lowercase__ : Optional[int] = not hasattr(__lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase__ : int = list(model.named_children() )
lowercase__ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
lowercase__ : Union[str, Any] = set(__lowerCamelCase ) - set(__lowerCamelCase )
lowercase__ : List[str] = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase )
# remove ".weight" from the keys
lowercase__ : int = ['''.weight''', '''.bias''']
lowercase__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase__ : str = name.replace(__lowerCamelCase , '''''' )
filtered_module_names.append(__lowerCamelCase )
return filtered_module_names
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for m in model.modules():
if isinstance(__lowerCamelCase , bnb.nn.Linearabit ):
return True
return False
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
return next(parameter.parameters() ).device
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase )
lowercase__ : Any = param_name
lowercase__ : List[Any] = model
if "." in tensor_name:
lowercase__ : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase__ : int = getattr(__lowerCamelCase , __lowerCamelCase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
lowercase__ : int = new_module
lowercase__ : str = splits[-1]
# offload weights
lowercase__ : Any = False
offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , )
else:
offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase )
offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase )
set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
| 16
|
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : Any ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : str ) -> List[str]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 55
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=9_9 , UpperCAmelCase__ : Any=3_6 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=1_0_0_0 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = text_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase = text_seq_length
lowerCAmelCase = (image_size // patch_size) ** 2 + 1
lowerCAmelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = LayoutLMvaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# text + image
lowerCAmelCase = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = LayoutLMvaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = False
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = LayoutLMvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=False ) -> Optional[int]:
lowerCAmelCase = copy.deepcopy(UpperCAmelCase__ )
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in get_values(UpperCAmelCase__ ):
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
elif model_class in [
*get_values(UpperCAmelCase__ ),
]:
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Any ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = LayoutLMvaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> str:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[1, 2]] )
lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase = model(
input_ids=input_ids.to(UpperCAmelCase__ ) , bbox=bbox.to(UpperCAmelCase__ ) , pixel_values=pixel_values.to(UpperCAmelCase__ ) , )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 55
| 1
|
"""simple docstring"""
_snake_case : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_snake_case : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = True
A = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
order.append(UpperCamelCase )
return order
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = True
A = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return component
def A__ ( UpperCamelCase ):
A = len(UpperCamelCase ) * [False]
A = {vert: [] for vert in range(len(UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase )
A = []
for i, was_visited in enumerate(UpperCamelCase ):
if not was_visited:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = []
A = len(UpperCamelCase ) * [False]
for i in range(len(UpperCamelCase ) ):
A = order[len(UpperCamelCase ) - i - 1]
if not visited[vert]:
A = find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
components_list.append(UpperCamelCase )
return components_list
| 292
|
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=13 , lowerCAmelCase__ : List[Any]=30 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=32 , lowerCAmelCase__ : Optional[Any]=5 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : str=37 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=2 , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 1
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ViTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_UpperCamelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ViTForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_UpperCamelCase = model(lowercase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = ViTForMaskedImageModeling(lowercase__ )
model.to(lowercase__ )
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowercase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_UpperCamelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = ViTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : int = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = True
_snake_case : Any = False
_snake_case : Union[str, Any] = False
_snake_case : List[str] = False
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = ViTModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def snake_case__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase__ )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : str ) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(lowercase__ )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase__ )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
_UpperCamelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
@slow
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(lowercase__ )
_UpperCamelCase = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase__ , return_tensors='''pt''' )
_UpperCamelCase = inputs.pixel_values.to(lowercase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(lowercase__ , interpolate_pos_encoding=lowercase__ )
# verify the logits
_UpperCamelCase = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
_UpperCamelCase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase__ , return_tensors='''pt''' )
_UpperCamelCase = inputs.pixel_values.to(lowercase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase = model(lowercase__ )
| 358
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''c'''] )
self.assertEqual(lowerCAmelCase__ , [2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [0, 2] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [-3, -1] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [-3, -1] )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowerCAmelCase__ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = BackboneMixin()
_UpperCamelCase = ['''a''', '''b''', '''c''']
_UpperCamelCase = ['''a''', '''c''']
_UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 287
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """resnet"""
lowerCAmelCase_ = ["""basic""", """bottleneck"""]
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=6_4 , __lowerCAmelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __lowerCAmelCase=[3, 4, 6, 3] , __lowerCAmelCase="bottleneck" , __lowerCAmelCase="relu" , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
lowerCamelCase__ = num_channels
lowerCamelCase__ = embedding_size
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = layer_type
lowerCamelCase__ = hidden_act
lowerCamelCase__ = downsample_in_first_stage
lowerCamelCase__ = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-3
| 209
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_a = random.Random()
def lowerCAmelCase__(__snake_case ,__snake_case=1.0 ,__snake_case=None ,__snake_case=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
lowerCamelCase__ = global_rng
lowerCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=2_0_0_0 , __lowerCAmelCase=2_4 , __lowerCAmelCase=2_4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_6_0_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=True , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = min_seq_length
lowerCamelCase__ = max_seq_length
lowerCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ = feature_size
lowerCamelCase__ = num_mel_bins
lowerCamelCase__ = padding_value
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = return_attention_mask
lowerCamelCase__ = do_normalize
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
lowerCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowerCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCamelCase__ = np.asarray(__lowerCAmelCase )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ = [None, 1_6, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = [np.sum(__lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase__ = [None, 1_6, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = [np.sum(__lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding='''max_length''' , max_length=4 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding='''longest''' , max_length=4 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
lowerCamelCase__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCamelCase__ = feature_extractor(
__lowerCAmelCase , padding='''longest''' , max_length=1_6 , truncation=__lowerCAmelCase , return_tensors='''np''' , return_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = inputs.input_features
lowerCamelCase__ = inputs.attention_mask
lowerCamelCase__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __lowerCamelCase ( self ):
'''simple docstring'''
import torch
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
lowerCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase__ = ds.sort('''id''' ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase__ = self._load_datasamples(1 )
lowerCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCAmelCase , atol=1E-4 ) )
| 209
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE__ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
SCREAMING_SNAKE_CASE__ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
SCREAMING_SNAKE_CASE__ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE__ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE__ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: str ):
'''simple docstring'''
for tf_name, hf_name in patterns:
lowercase_ = k.replace(__lowerCamelCase , __lowerCamelCase )
return k
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , __lowerCamelCase: dict ):
'''simple docstring'''
lowercase_ = BigBirdPegasusConfig(**__lowerCamelCase )
lowercase_ = BigBirdPegasusForConditionalGeneration(__lowerCamelCase )
lowercase_ = torch_model.state_dict()
lowercase_ = {}
# separating decoder weights
lowercase_ = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
lowercase_ = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
lowercase_ = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
lowercase_ = DECODER_PATTERNS
lowercase_ = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowercase_ = v.T
lowercase_ = torch.from_numpy(__lowerCamelCase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
lowercase_ = [k.endswith(__lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCamelCase ):
continue
lowercase_ = REMAINING_PATTERNS
lowercase_ = rename_state_dict_key(__lowerCamelCase , __lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowercase_ = v.T
lowercase_ = torch.from_numpy(__lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
lowercase_ = mapping["model.embed_positions.weight"]
lowercase_ = mapping.pop("model.embed_positions.weight" )
lowercase_ , lowercase_ = torch_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
lowercase_ = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCamelCase )
lowercase_ = {}
lowercase_ = ["global_step"]
for name, shape in tqdm(__lowerCamelCase , desc="converting tf checkpoint to dict" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
lowercase_ = array
return tf_weights
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str , __lowerCamelCase: dict ):
'''simple docstring'''
lowercase_ = get_tf_weights_as_numpy(__lowerCamelCase )
lowercase_ = convert_bigbird_pegasus(__lowerCamelCase , __lowerCamelCase )
torch_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 297
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
lowerCAmelCase__ :Tuple = logging.get_logger(__name__)
lowerCAmelCase__ :Union[str, Any] = '''Hello world! cécé herlolip'''
def lowerCAmelCase__ ( a__: str , a__: str , a__: bool ) -> str:
'''simple docstring'''
_UpperCAmelCase = FairseqRobertaModel.from_pretrained(a__ )
roberta.eval() # disable dropout
_UpperCAmelCase = roberta.model.encoder.sentence_encoder
_UpperCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_UpperCAmelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , a__ )
_UpperCAmelCase = XLMRobertaXLForSequenceClassification(a__ ) if classification_head else XLMRobertaXLForMaskedLM(a__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_UpperCAmelCase = roberta_sent_encoder.embed_tokens.weight
_UpperCAmelCase = roberta_sent_encoder.embed_positions.weight
_UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_UpperCAmelCase = roberta_sent_encoder.layer_norm.weight
_UpperCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_UpperCAmelCase = model.roberta.encoder.layer[i]
_UpperCAmelCase = roberta_sent_encoder.layers[i]
_UpperCAmelCase = layer.attention
_UpperCAmelCase = roberta_layer.self_attn_layer_norm.weight
_UpperCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_UpperCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_UpperCAmelCase = roberta_layer.self_attn.q_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.q_proj.bias
_UpperCAmelCase = roberta_layer.self_attn.k_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.k_proj.bias
_UpperCAmelCase = roberta_layer.self_attn.v_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_UpperCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_UpperCAmelCase = roberta_layer.self_attn.out_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_UpperCAmelCase = roberta_layer.final_layer_norm.weight
_UpperCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_UpperCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_UpperCAmelCase = roberta_layer.fca.weight
_UpperCAmelCase = roberta_layer.fca.bias
# output
_UpperCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_UpperCAmelCase = roberta_layer.fca.weight
_UpperCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_UpperCAmelCase = roberta.model.classification_heads['mnli'].dense.weight
_UpperCAmelCase = roberta.model.classification_heads['mnli'].dense.bias
_UpperCAmelCase = roberta.model.classification_heads['mnli'].out_proj.weight
_UpperCAmelCase = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_UpperCAmelCase = roberta.model.encoder.lm_head.dense.weight
_UpperCAmelCase = roberta.model.encoder.lm_head.dense.bias
_UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_UpperCAmelCase = roberta.model.encoder.lm_head.weight
_UpperCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_UpperCAmelCase = roberta.encode(a__ ).unsqueeze(0 ) # batch of size 1
_UpperCAmelCase = model(a__ )[0]
if classification_head:
_UpperCAmelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(a__ ) )
else:
_UpperCAmelCase = roberta.model(a__ )[0]
print(our_output.shape , their_output.shape )
_UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_UpperCAmelCase = torch.allclose(a__ , a__ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(a__ ).mkdir(parents=a__ , exist_ok=a__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ :Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 329
|
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE : Union[str, Any] = """ResNetConfig"""
# Base docstring
_SCREAMING_SNAKE_CASE : str = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : List[Any] = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-50"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """tiger cat"""
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , bias=lowercase_ )
UpperCamelCase__ : Tuple =nn.BatchNormad(lowercase_ )
UpperCamelCase__ : int =ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor ):
UpperCamelCase__ : List[Any] =self.convolution(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.normalization(lowercase_ )
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Any =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase__ : Tuple =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase__ : Any =config.num_channels
def _lowerCAmelCase ( self : str , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase__ : Dict =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.pooler(lowercase_ )
return embedding
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
UpperCamelCase__ : int =nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
UpperCamelCase__ : Optional[int] =nn.BatchNormad(lowercase_ )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Dict =self.convolution(lowercase_ )
UpperCamelCase__ : Dict =self.normalization(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : List[str] =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , activation=lowercase_ ) , )
UpperCamelCase__ : Any =ACTaFN[activation]
def _lowerCAmelCase ( self : str , lowercase_ : Tuple ):
UpperCamelCase__ : Any =hidden_state
UpperCamelCase__ : Union[str, Any] =self.layer(lowercase_ )
UpperCamelCase__ : str =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : str =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 , lowercase_ : str = "relu" , lowercase_ : int = 4 ):
super().__init__()
UpperCamelCase__ : Optional[Any] =in_channels != out_channels or stride != 1
UpperCamelCase__ : Union[str, Any] =out_channels // reduction
UpperCamelCase__ : str =(
ResNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase__ : int =nn.Sequential(
ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 ) , ResNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ ) , ResNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
UpperCamelCase__ : List[Any] =ACTaFN[activation]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
UpperCamelCase__ : Dict =hidden_state
UpperCamelCase__ : str =self.layer(lowercase_ )
UpperCamelCase__ : Tuple =self.shortcut(lowercase_ )
hidden_state += residual
UpperCamelCase__ : Optional[int] =self.activation(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowercase_ : ResNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
UpperCamelCase__ : Dict =ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
UpperCamelCase__ : Union[str, Any] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , stride=lowercase_ , activation=config.hidden_act ) , *[layer(lowercase_ , lowercase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Tensor ):
UpperCamelCase__ : Optional[Any] =input
for layer in self.layers:
UpperCamelCase__ : Tuple =layer(lowercase_ )
return hidden_state
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase_ : ResNetConfig ):
super().__init__()
UpperCamelCase__ : Optional[Any] =nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase__ : int =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def _lowerCAmelCase ( self : Dict , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
UpperCamelCase__ : int =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase__ : Union[str, Any] =hidden_states + (hidden_state,)
UpperCamelCase__ : List[str] =stage_module(lowercase_ )
if output_hidden_states:
UpperCamelCase__ : Optional[Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=lowercase_ , )
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ResNetConfig
SCREAMING_SNAKE_CASE_ = 'resnet'
SCREAMING_SNAKE_CASE_ = 'pixel_values'
SCREAMING_SNAKE_CASE_ = True
def _lowerCAmelCase ( self : str , lowercase_ : Optional[int] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict=False ):
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase__ : str =value
_SCREAMING_SNAKE_CASE : int = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_SCREAMING_SNAKE_CASE : Optional[int] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Dict =config
UpperCamelCase__ : str =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : str =ResNetEncoder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Optional[Any] =self.embedder(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : int =encoder_outputs[0]
UpperCamelCase__ : List[Any] =self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', snake_case__, )
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Union[str, Any] ):
super().__init__(lowercase_ )
UpperCamelCase__ : Any =config.num_labels
UpperCamelCase__ : Dict =ResNetModel(lowercase_ )
# classification head
UpperCamelCase__ : Any =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
UpperCamelCase__ : Dict =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : List[Any] =self.resnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : Tuple =outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase__ : Union[str, Any] =self.classifier(lowercase_ )
UpperCamelCase__ : int =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase__ : List[str] ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase__ : Dict ='''single_label_classification'''
else:
UpperCamelCase__ : str ='''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase__ : Union[str, Any] =MSELoss()
if self.num_labels == 1:
UpperCamelCase__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase__ : Dict =loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase__ : List[Any] =CrossEntropyLoss()
UpperCamelCase__ : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase__ : Optional[Any] =BCEWithLogitsLoss()
UpperCamelCase__ : List[str] =loss_fct(lowercase_ , lowercase_ )
if not return_dict:
UpperCamelCase__ : Tuple =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ', snake_case__, )
class __a ( snake_case__, snake_case__ ):
"""simple docstring"""
def __init__( self : str , lowercase_ : List[Any] ):
super().__init__(lowercase_ )
super()._init_backbone(lowercase_ )
UpperCamelCase__ : str =[config.embedding_size] + config.hidden_sizes
UpperCamelCase__ : Optional[int] =ResNetEmbeddings(lowercase_ )
UpperCamelCase__ : Dict =ResNetEncoder(lowercase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self : int , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
UpperCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ : Any =self.embedder(lowercase_ )
UpperCamelCase__ : Optional[Any] =self.encoder(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
UpperCamelCase__ : str =outputs.hidden_states
UpperCamelCase__ : Optional[int] =()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase__ : int =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase_ , )
| 157
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_SCREAMING_SNAKE_CASE : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Any =argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : Dict =parser.parse_args()
return args.f
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int="eval" ):
'''simple docstring'''
UpperCamelCase__ : Dict =os.path.join(UpperCAmelCase , F'''{split}_results.json''' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , '''r''' ) as f:
return json.load(UpperCAmelCase )
raise ValueError(F'''can\'t find {path}''' )
_SCREAMING_SNAKE_CASE : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_glue.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : str =f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_clm_flax.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : List[Any] =f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_summarization_flax.main()
UpperCamelCase__ : Union[str, Any] =get_results(lowercase_ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_mlm_flax.main()
UpperCamelCase__ : List[Any] =get_results(lowercase_ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Optional[int] =f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_ta_mlm_flax.main()
UpperCamelCase__ : Optional[int] =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def _lowerCAmelCase ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase__ : Union[str, Any] =7 if get_gpu_count() > 1 else 2
UpperCamelCase__ : int =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : int =f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_flax_ner.main()
UpperCamelCase__ : Any =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : Union[str, Any] =self.get_auto_remove_tmp_dir()
UpperCamelCase__ : Any =f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
run_qa.main()
UpperCamelCase__ : Tuple =get_results(lowercase_ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 157
| 1
|
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
a_ : Union[str, Any] = logging.getLogger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "token-classification"
def __init__( self , UpperCamelCase ):
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCamelCase_ = Namespace(**UpperCamelCase )
lowerCamelCase_ = import_module("tasks" )
try:
lowerCamelCase_ = getattr(UpperCamelCase , hparams.task_type )
lowerCamelCase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowerCamelCase_ = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase_ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
return self.model(**UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ = self(**UpperCamelCase )
lowerCamelCase_ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase_ = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase )
lowerCamelCase_ = torch.load(UpperCamelCase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase_ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCamelCase_ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = False ):
"""simple docstring"""
lowerCamelCase_ = self._feature_file(UpperCamelCase )
logger.info("Loading features from cached file %s" , UpperCamelCase )
lowerCamelCase_ = torch.load(UpperCamelCase )
lowerCamelCase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase_ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase_ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
"""Compute validation""" ""
lowerCamelCase_ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ = self(**UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = outputs[:2]
lowerCamelCase_ = logits.detach().cpu().numpy()
lowerCamelCase_ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowerCamelCase_ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowerCamelCase_ = np.argmax(UpperCamelCase , axis=2 )
lowerCamelCase_ = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase_ = dict(enumerate(self.labels ) )
lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase_ = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(UpperCamelCase , UpperCamelCase ),
"precision": precision_score(UpperCamelCase , UpperCamelCase ),
"recall": recall_score(UpperCamelCase , UpperCamelCase ),
"f1": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCamelCase_ = dict(results.items() )
lowerCamelCase_ = results
return ret, preds_list, out_label_list
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# when stable
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self._eval_end(UpperCamelCase )
lowerCamelCase_ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase_ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"--task_type" , default="NER" , type=UpperCamelCase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=UpperCamelCase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
a_ : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
a_ : Any = parser.parse_args()
a_ : int = NERTransformer(args)
a_ : Union[str, Any] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
a_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
a_ : Optional[int] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 55
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = BlenderbotSmallTokenizer
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
lowerCamelCase_ = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = "adapt act apte"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = ["adapt", "act", "ap@@", "te"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase_ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
lowerCamelCase_ = "I am a small frog."
lowerCamelCase_ = tok([src_text] , padding=UpperCamelCase , truncation=UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
lowerCamelCase_ = "I am a small frog ."
lowerCamelCase_ = "."
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 55
| 1
|
class __magic_name__ :
def __init__( self , _a ) -> List[str]:
lowerCAmelCase_ = val
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __a ( self , _a ) -> List[str]:
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase_ = Node(_lowercase )
else:
self.left.insert(_lowercase )
elif val > self.val:
if self.right is None:
lowerCAmelCase_ = Node(_lowercase )
else:
self.right.insert(_lowercase )
else:
lowerCAmelCase_ = val
def A(__a: List[Any] , __a: Optional[int] ):
if root:
inorder(root.left , SCREAMING_SNAKE_CASE_ )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE_ )
def A(__a: Optional[Any] ):
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return arr
lowerCAmelCase_ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase_ = []
inorder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 365
|
import math
def A(__a: int ):
return math.sqrt(__a ) * math.sqrt(__a ) == num
def A(__a: int ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = n
while left <= right:
lowerCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase_ = mid - 1
else:
lowerCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
"""simple docstring"""
import math
import unittest
def lowercase ( lowerCAmelCase__ : int ) -> bool:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCAmelCase ( self ):
with self.assertRaises(_a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 45
|
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
| 0
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ )-> None:
'''simple docstring'''
UpperCamelCase = len(A_ )
UpperCamelCase = [0] * len_array
if len_array > 0:
UpperCamelCase = array[0]
for i in range(1 , A_ ):
UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase_ ( self , A_ )-> bool:
'''simple docstring'''
UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Tuple = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase : Dict = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : Any = [1, 10_24, 7, 7]
# Image classification docstring
lowerCAmelCase : Optional[Any] = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : List[str] = 'tabby, tabby cat'
lowerCAmelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_( A : Union[str, Any] , A : Optional[Any] , A : Optional[Any]=None):
UpperCamelCase = {}
if isinstance(A , A):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = 'MobilenetV1/Conv2d_0/'
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(A , A):
UpperCamelCase = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def A_( A : int , A : str , A : Optional[int]):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(A)
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(A , A , A)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase = np.transpose(A , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(A , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
UpperCamelCase = torch.from_numpy(A)
tf_weights.pop(A , A)
tf_weights.pop(name + '/RMSProp' , A)
tf_weights.pop(name + '/RMSProp_1' , A)
tf_weights.pop(name + '/ExponentialMovingAverage' , A)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def A_( A : torch.Tensor , A : nn.Convad):
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0)
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0)
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , 'constant' , 0.0)
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ = 1 , A_ = 1 , A_ = False , A_ = True , A_ = True , )-> None:
'''simple docstring'''
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=A_ , out_channels=A_ , kernel_size=A_ , stride=A_ , padding=A_ , groups=A_ , bias=A_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=A_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=A_ , track_running_stats=A_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(A_ , A_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(A_ , self.convolution )
UpperCamelCase = self.convolution(A_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(A_ )
if self.activation is not None:
UpperCamelCase = self.activation(A_ )
return features
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = """mobilenet_v1"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ = True )-> Union[str, Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
A_ , in_channels=config.num_channels , out_channels=A_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=3 , stride=strides[i] , groups=A_ , ) )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase = self.conv_stem(A_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(A_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(A_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=A_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> None:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(A_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=A_ )
UpperCamelCase = nn.Linear(A_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(A_ , output_hidden_states=A_ , return_dict=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(A_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = 'single_label_classification'
else:
UpperCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(A_ , A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A_ , logits=A_ , hidden_states=outputs.hidden_states , )
| 251
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(_A , _A ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE = get_keys_to_not_convert(_A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_A )
SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_A )
# compatibility with peft
SCREAMING_SNAKE_CASE = load_in_abit
SCREAMING_SNAKE_CASE = load_in_abit
SCREAMING_SNAKE_CASE = get_parameter_device(_A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE = replace_with_bnb_layers(_A , _A , modules_to_not_convert=_A )
# convert param to the right dtype
SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
SCREAMING_SNAKE_CASE = getattr(_A , _A , _A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_A ):
param.to(_A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
_A , _A , modules_to_not_convert=_A )
SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
_A , _A , _A , max_memory=_A , no_split_module_classes=_A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_A , _A , _A , dtype=bnb_quantization_config.torch_dtype , offload_folder=_A , offload_state_dict=_A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_A , device_map=_A , offload_dir=_A )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE = {'': torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""" )
if isinstance(_A , _A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""" )
SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = special_dtypes
SCREAMING_SNAKE_CASE = no_split_module_classes
SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE = get_balanced_memory(
_A , low_zero=(device_map == """balanced_low_0""") , max_memory=_A , **_A , )
SCREAMING_SNAKE_CASE = max_memory
SCREAMING_SNAKE_CASE = infer_auto_device_map(_A , **_A )
if isinstance(_A , _A ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n """ )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
_A , _A , _A , _A )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE = []
current_key_name.append(_A )
if isinstance(_A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE = '.'.join(_A )
SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""" )
SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(_A )
setattr(_A , _A , _A )
SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
_A , _A , _A , _A )
SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
SCREAMING_SNAKE_CASE = deepcopy(_A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE = find_tied_parameters(_A )
# For compatibility with Accelerate < 0.18
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE = sum(_A , [] )
SCREAMING_SNAKE_CASE = len(_A ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE = False
if hasattr(_A , """base_model_prefix""" ):
SCREAMING_SNAKE_CASE = not hasattr(_A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE = list(model.named_children() )
SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE = set(_A ) - set(_A )
SCREAMING_SNAKE_CASE = list(set(_A ) ) + list(_A )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE = ['.weight', '.bias']
SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE = name.replace(_A , """""" )
filtered_module_names.append(_A )
return filtered_module_names
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for m in model.modules():
if isinstance(_A , bnb.nn.Linearabit ):
return True
return False
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_A , _A , 0 , dtype=_A , value=_A )
SCREAMING_SNAKE_CASE = param_name
SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE = getattr(_A , _A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE = new_module
SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , _A , _A , index=_A )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _A , index=_A , )
else:
offload_weight(_A , _A , _A , index=_A )
offload_weight(_A , param_name.replace("""weight""" , """SCB""" ) , _A , index=_A )
set_module_tensor_to_device(_A , _A , """meta""" , dtype=_A , value=torch.empty(*param.size() ) )
| 296
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }")
| 297
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
lowercase__ : Dict = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Union[str, Any]:
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Dict:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
a = tmp_path_factory.getbasetemp() / "cache"
a = test_hf_cache_home / "datasets"
a = test_hf_cache_home / "metrics"
a = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(UpperCAmelCase_))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(UpperCAmelCase_))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(UpperCAmelCase_))
a = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(UpperCAmelCase_))
a = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCAmelCase_))
@pytest.fixture(autouse=UpperCAmelCase_ , scope="session")
def SCREAMING_SNAKE_CASE ( ) -> Dict:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , UpperCAmelCase_)
@pytest.fixture
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , UpperCAmelCase_)
| 353
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
a = 0
a = number
while duplicate > 0:
a , a = divmod(__UpperCamelCase , 10)
fact_sum += factorial(__UpperCamelCase)
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
lowercase__ : str = int(input("Enter number: ").strip())
print(
F'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 180
| 0
|
import os
import sys
import unittest
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
__UpperCAmelCase : Tuple = get_test_to_tester_mapping(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = get_test_to_tester_mapping(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"BertModelTest": "BertModelTester"}
__UpperCAmelCase : Dict = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Any:
__UpperCAmelCase : Optional[int] = get_model_to_test_mapping(__lowerCamelCase )
__UpperCAmelCase : Any = get_model_to_test_mapping(__lowerCamelCase )
__UpperCAmelCase : str = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__UpperCAmelCase : int = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : int = get_model_to_tester_mapping(__lowerCamelCase )
__UpperCAmelCase : str = get_model_to_tester_mapping(__lowerCamelCase )
__UpperCAmelCase : List[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__UpperCAmelCase : str = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
| 157
|
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
__UpperCAmelCase : int = ""
for word_or_phrase in separated:
if not isinstance(snake_case__, snake_case__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42 # [batch_size x 3]
__lowerCAmelCase = 42 # [batch_size x 3]
__lowerCAmelCase = 42 # [batch_size x 3]
__lowerCAmelCase = 42 # [batch_size x 3]
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE ( self ) -> torch.Tensor:
a =torch.arange(self.height * self.width )
a =torch.stack(
[
pixel_indices % self.width,
torch.div(__A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a , *a =self.shape
a =int(np.prod(__A ) )
a =self.get_image_coords()
a =torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
a =self.get_camera_rays(__A )
a =rays.view(__A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE ( self , __A ) -> torch.Tensor:
a , *a , a =coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
a =coords.view(__A , -1 , 2 )
a =self.resolution()
a =self.fov()
a =(flat.float() / (res - 1)) * 2 - 1
a =fracs * torch.tan(fov / 2 )
a =fracs.view(__A , -1 , 2 )
a =(
self.z.view(__A , 1 , 3 )
+ self.x.view(__A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__A , 1 , 3 ) * fracs[:, :, 1:]
)
a =directions / directions.norm(dim=-1 , keepdim=__A )
a =torch.stack(
[
torch.broadcast_to(self.origin.view(__A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__A , *__A , 2 , 3 )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__A , height=__A , x_fov=self.x_fov , y_fov=self.y_fov , )
def _A ( lowercase ):
"""simple docstring"""
a =[]
a =[]
a =[]
a =[]
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
a =np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
a =-z * 4
a =np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
a =np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 371
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase_ : List[Any] = TypeVar("""T""")
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self , __A ) -> None:
a =data
a =self
a =0
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
# map from node name to the node object
a ={}
def SCREAMING_SNAKE_CASE ( self , __A ) -> None:
# create a new set with x as its member
a =DisjointSetTreeNode(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
a =self.map[data]
if elem_ref != elem_ref.parent:
a =self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None:
# helper function for union operation
if nodea.rank > nodea.rank:
a =nodea
else:
a =nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None:
# merge 2 disjoint sets
self.link(self.find_set(__A ) , self.find_set(__A ) )
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
a ={}
def SCREAMING_SNAKE_CASE ( self , __A ) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
a ={}
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> None:
# add an edge with the given weight
self.add_node(__A )
self.add_node(__A )
a =weight
a =weight
def SCREAMING_SNAKE_CASE ( self ) -> GraphUndirectedWeighted[T]:
a =[]
a =set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __A : x[2] )
# creating the disjoint set
a =DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__A )
# MST generation
a =0
a =0
a =GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a =edges[index]
index += 1
a =disjoint_set.find_set(__A )
a =disjoint_set.find_set(__A )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__A , __A , __A )
disjoint_set.union(__A , __A )
return graph
| 215
| 0
|
import math
from numpy import inf
from scipy.integrate import quad
def UpperCamelCase ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59
|
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_A = typing.Union[np.floataa, int, float] # noqa: UP007
def __UpperCamelCase ( _A , _A ):
return np.sqrt(np.sum((np.asarray(_A ) - np.asarray(_A )) ** 2 ) )
def __UpperCamelCase ( _A , _A ):
return sum((va - va) ** 2 for va, va in zip(_A , _A ) ) ** (1 / 2)
if __name__ == "__main__":
def __UpperCamelCase ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
benchmark()
| 167
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__, '''num_heads''' ) )
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=64, UpperCamelCase__=3, UpperCamelCase__=[16, 48, 96], UpperCamelCase__=[1, 3, 6], UpperCamelCase__=[1, 2, 10], UpperCamelCase__=[7, 3, 3], UpperCamelCase__=[4, 2, 2], UpperCamelCase__=[2, 1, 1], UpperCamelCase__=[2, 2, 2], UpperCamelCase__=[False, False, True], UpperCamelCase__=[0.0, 0.0, 0.0], UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=2, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = patch_stride
lowerCAmelCase_ = patch_padding
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = stride_kv
lowerCAmelCase_ = depth
lowerCAmelCase_ = cls_token
lowerCAmelCase_ = attention_drop_rate
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
# create a random int32 tensor of given shape
lowerCAmelCase_ = ids_tensor([self.batch_size], self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFCvtModel(config=UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__, training=UpperCamelCase__ )
lowerCAmelCase_ = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = TFCvtForImageClassification(UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__, labels=UpperCamelCase__, training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__snake_case = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFCvtModelTester(self )
lowerCAmelCase_ = TFCvtConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(UpperCamelCase__ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = TFCvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase ( ):
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''tf''' )
# forward pass
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), UpperCamelCase__, atol=1E-4 ) )
| 167
| 1
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ''
for i in table:
res += inp[i - 1]
return res
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
return data[1:] + data[0]
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ''
for i in range(len(__UpperCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = int('0b' + data[0] + data[-1] ,2 )
SCREAMING_SNAKE_CASE : int = int('0b' + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Any ,__UpperCamelCase: Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = message[:4]
SCREAMING_SNAKE_CASE : List[str] = message[4:]
SCREAMING_SNAKE_CASE : List[str] = apply_table(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = xor(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = apply_sbox(__UpperCamelCase ,temp[:4] ) # noqa: E741
SCREAMING_SNAKE_CASE : int = apply_sbox(__UpperCamelCase ,temp[4:] )
SCREAMING_SNAKE_CASE : Tuple = '0' * (2 - len(__UpperCamelCase )) + l # noqa: E741
SCREAMING_SNAKE_CASE : Union[str, Any] = '0' * (2 - len(__UpperCamelCase )) + r
SCREAMING_SNAKE_CASE : List[Any] = apply_table(l + r ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = xor(__UpperCamelCase ,__UpperCamelCase )
return temp + right
if __name__ == "__main__":
UpperCamelCase_ = input("Enter 10 bit key: ")
UpperCamelCase_ = input("Enter 8 bit message: ")
UpperCamelCase_ = [6, 3, 7, 4, 8, 5, 1_0, 9]
UpperCamelCase_ = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
UpperCamelCase_ = [2, 4, 3, 1]
UpperCamelCase_ = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase_ = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase_ = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase_ = apply_table(key, paa_table)
UpperCamelCase_ = temp[:5]
UpperCamelCase_ = temp[5:]
UpperCamelCase_ = left_shift(left)
UpperCamelCase_ = left_shift(right)
UpperCamelCase_ = apply_table(left + right, pa_table)
UpperCamelCase_ = left_shift(left)
UpperCamelCase_ = left_shift(right)
UpperCamelCase_ = left_shift(left)
UpperCamelCase_ = left_shift(right)
UpperCamelCase_ = apply_table(left + right, pa_table)
# encryption
UpperCamelCase_ = apply_table(message, IP)
UpperCamelCase_ = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ = temp[4:] + temp[:4]
UpperCamelCase_ = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
UpperCamelCase_ = apply_table(CT, IP)
UpperCamelCase_ = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ = temp[4:] + temp[:4]
UpperCamelCase_ = function(expansion, sa, sa, keya, temp)
UpperCamelCase_ = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Union[str, Any] = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 359
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
for i in range(1, len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(__A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(__A ) ):
for j in range(1, len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
import math
import sys
def snake_case ( snake_case__ :int) -> int:
if number != int(snake_case__):
raise ValueError("""the value of input must be a natural number""")
if number < 0:
raise ValueError("""the value of input must not be a negative number""")
if number == 0:
return 1
_A = [-1] * (number + 1)
_A = 0
for i in range(1 , number + 1):
_A = sys.maxsize
_A = int(math.sqrt(snake_case__))
for j in range(1 , root + 1):
_A = 1 + answers[i - (j**2)]
_A = min(snake_case__ , snake_case__)
_A = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
| 0
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = RobertaTokenizer
lowercase_ = RobertaTokenizerFast
lowercase_ = True
lowercase_ = {'cls_token': '<s>'}
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
_a = '''lower newer'''
_a = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''lower newer'''
_a = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_a = tokenizer.tokenize(lowerCAmelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCAmelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCAmelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.tokenizer_class.from_pretrained('''roberta-base''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase_ )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase_ )
_a = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
_a = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.get_tokenizer()
_a = '''Encode this sequence.'''
_a = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_a = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing spaces after special tokens
_a = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
_a = '''Encode <mask> sequence'''
_a = '''Encode <mask>sequence'''
_a = tokenizer.encode(lowerCAmelCase_ )
_a = encoded.index(lowerCAmelCase_ )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_a = tokenizer.encode(lowerCAmelCase_ )
_a = encoded.index(lowerCAmelCase_ )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCAmelCase_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCAmelCase_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_a = F'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_a = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
_a = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
| 179
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=99 , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=30 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
_a = parent
_a = batch_size
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = d_model
_a = d_model
_a = decoder_layers
_a = decoder_layers
_a = decoder_ffn_dim
_a = decoder_attention_heads
_a = decoder_attention_heads
_a = eos_token_id
_a = bos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = use_cache
_a = max_position_embeddings
_a = None
_a = decoder_seq_length
_a = 2
_a = 1
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_a = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , ) -> int:
"""simple docstring"""
_a = True
_a = TrOCRDecoder(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval()
_a = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) + 1 )
_a = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(lowerCAmelCase_ )['''last_hidden_state''']
_a = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ = True
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCAmelCase_ )
_a = ConfigTester(self , config_class=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
| 179
| 1
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase_( SCREAMING_SNAKE_CASE_ = True , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
lowerCamelCase : Any = False
if main_process_only:
lowerCamelCase : List[Any] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 283
|
'''simple docstring'''
# using dfs for finding eulerian path traversal
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_UpperCAmelCase ,_UpperCAmelCase : Tuple = True, True
_UpperCAmelCase : List[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return path
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[int] = -1
for i in range(lowerCAmelCase_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_UpperCAmelCase : Optional[int] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_UpperCAmelCase ,_UpperCAmelCase : int = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_UpperCAmelCase : Dict = 1
if check == 2:
_UpperCAmelCase : Dict = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_UpperCAmelCase : Dict = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(lowerCAmelCase_ )
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_UpperCAmelCase : int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_UpperCAmelCase : Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_UpperCAmelCase : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_UpperCAmelCase : List[str] = {
1: [],
2: []
# all degree is zero
}
_UpperCAmelCase : Union[str, Any] = 10
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
check_euler(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 215
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DiTPipeline
_UpperCamelCase : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Dict = False
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = AutoencoderKL()
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=0 ) -> Dict:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 328
| 1
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = """data2vec-audio"""
def __init__( self : Dict , _lowerCamelCase : Any=32 , _lowerCamelCase : List[str]=7_68 , _lowerCamelCase : int=12 , _lowerCamelCase : str=12 , _lowerCamelCase : Dict=30_72 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : int=0.0 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.02 , _lowerCamelCase : List[str]=1E-5 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _lowerCamelCase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase : Tuple=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=16 , _lowerCamelCase : Union[str, Any]=19 , _lowerCamelCase : Dict=5 , _lowerCamelCase : Any=0.05 , _lowerCamelCase : List[Any]=10 , _lowerCamelCase : Dict=2 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Tuple="sum" , _lowerCamelCase : Dict=False , _lowerCamelCase : str=False , _lowerCamelCase : Optional[Any]=2_56 , _lowerCamelCase : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , _lowerCamelCase : str=(5, 3, 3, 1, 1) , _lowerCamelCase : int=(1, 2, 3, 1, 1) , _lowerCamelCase : List[Any]=5_12 , _lowerCamelCase : List[str]=0 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : int=2 , _lowerCamelCase : str=False , _lowerCamelCase : Dict=3 , _lowerCamelCase : int=2 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : List[str]=None , **_lowerCamelCase : Dict , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
A_ : Any = hidden_size
A_ : List[Any] = feat_extract_activation
A_ : int = list(_lowerCamelCase )
A_ : Optional[Any] = list(_lowerCamelCase )
A_ : str = list(_lowerCamelCase )
A_ : Optional[Any] = conv_bias
A_ : str = num_conv_pos_embeddings
A_ : Optional[Any] = num_conv_pos_embedding_groups
A_ : Optional[int] = conv_pos_kernel_size
A_ : Any = len(self.conv_dim )
A_ : Union[str, Any] = num_hidden_layers
A_ : List[str] = intermediate_size
A_ : List[str] = hidden_act
A_ : int = num_attention_heads
A_ : Tuple = hidden_dropout
A_ : str = attention_dropout
A_ : Union[str, Any] = activation_dropout
A_ : List[str] = feat_proj_dropout
A_ : int = final_dropout
A_ : Tuple = layerdrop
A_ : List[Any] = layer_norm_eps
A_ : Tuple = initializer_range
A_ : int = vocab_size
A_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : List[Any] = mask_time_prob
A_ : Any = mask_time_length
A_ : Dict = mask_time_min_masks
A_ : Union[str, Any] = mask_feature_prob
A_ : Union[str, Any] = mask_feature_length
A_ : int = mask_feature_min_masks
# ctc loss
A_ : Union[str, Any] = ctc_loss_reduction
A_ : Dict = ctc_zero_infinity
# adapter
A_ : int = add_adapter
A_ : Any = adapter_kernel_size
A_ : Union[str, Any] = adapter_stride
A_ : Union[str, Any] = num_adapter_layers
A_ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : str = list(_lowerCamelCase )
A_ : Union[str, Any] = list(_lowerCamelCase )
A_ : Union[str, Any] = list(_lowerCamelCase )
A_ : List[str] = xvector_output_dim
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 167
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : str = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase : Optional[Any] = """OwlViTImageProcessor"""
__lowerCAmelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowerCamelCase , )
A_ : List[Any] = kwargs.pop('''feature_extractor''' )
A_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : Optional[int] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : str="max_length" , _lowerCamelCase : List[Any]="np" , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
A_ : List[str] = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
A_ : Optional[int] = []
# Maximum number of queries across batch
A_ : Any = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
A_ : Optional[int] = t + [''' '''] * (max_num_queries - len(_lowerCamelCase ))
A_ : Tuple = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
A_ : Union[str, Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : Dict = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ : List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : Any = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
A_ : Union[str, Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
A_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
A_ : Any = BatchEncoding()
A_ : Optional[Any] = input_ids
A_ : str = attention_mask
if query_images is not None:
A_ : Union[str, Any] = BatchEncoding()
A_ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
A_ : Dict = query_pixel_values
if images is not None:
A_ : int = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
A_ : str = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def a_ ( self : Optional[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : List[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : Dict , *_lowerCamelCase : Any , **_lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a_ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , )
return self.image_processor_class
@property
def a_ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , )
return self.image_processor
| 167
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = GPTaTokenizer
snake_case__ : Dict = GPTaTokenizerFast
snake_case__ : Optional[Any] = True
snake_case__ : Dict = {"add_prefix_space": True}
snake_case__ : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowerCamelCase : Optional[Any] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Optional[int] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCamelCase : Optional[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Dict = '''lower newer'''
_lowerCamelCase : Dict = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowerCamelCase : Optional[int] = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCamelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = '''lower newer'''
# Testing tokenization
_lowerCamelCase : Dict = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Dict = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
_lowerCamelCase : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
_lowerCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
_lowerCamelCase : List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : str ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
_lowerCamelCase : Tuple = '''This is a simple input'''
_lowerCamelCase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : List[str] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowerCamelCase : Optional[Any] = '''This is a simple input'''
_lowerCamelCase : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
_lowerCamelCase : Optional[int] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : str = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowerCamelCase : Optional[Any] = tokenizer.pad_token_id
_lowerCamelCase : List[str] = tokenizer(__lowerCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
_lowerCamelCase : Tuple = tokenizer(*__lowerCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''$$$'''
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
_lowerCamelCase : List[Any] = '''This is a simple input'''
_lowerCamelCase : int = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Tuple = tokenizer.bos_token_id
_lowerCamelCase : List[Any] = tokenizer(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : List[Any] = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = [self.get_tokenizer(do_lower_case=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : List[str] = '''Encode this.'''
_lowerCamelCase : Dict = '''This one too please.'''
_lowerCamelCase : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
encoded_sequence += tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.encode_plus(
__lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , )
_lowerCamelCase : List[str] = encoded_sequence_dict['''input_ids''']
_lowerCamelCase : List[str] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowerCAmelCase )
]
_lowerCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : List[Any] = '''A photo of a cat'''
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
_lowerCamelCase : str = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowerCAmelCase )
_lowerCamelCase : str = '''A photo of a cat'''
_lowerCamelCase : int = tokenizer.encode(
__lowerCAmelCase , )
# Same as above
self.assertEqual(__lowerCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowerCAmelCase )
_lowerCamelCase : Dict = '''bos'''
_lowerCamelCase : List[str] = tokenizer.get_vocab()['''bos''']
_lowerCamelCase : List[Any] = '''A photo of a cat'''
_lowerCamelCase : Tuple = tokenizer.encode(
__lowerCAmelCase , )
# We changed the bos token
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowerCamelCase : str = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(
__lowerCAmelCase , )
self.assertEqual(__lowerCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 175
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "xglm"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=2_5_6_0_0_8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Tuple=2_4 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[Any] = ffn_dim
_lowerCamelCase : Any = num_layers
_lowerCamelCase : Union[str, Any] = attention_heads
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : str = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 175
| 1
|
'''simple docstring'''
from torch import nn
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 70
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 227
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase : Optional[int] = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : bool , lowerCAmelCase__ : str = None , lowerCAmelCase__ : list = None):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: str = os.path.abspath(os.path.join("examples" , "by_feature"))
SCREAMING_SNAKE_CASE_: Any = os.path.abspath("examples")
for item in os.listdir(lowerCAmelCase__):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if os.path.isfile(lowerCAmelCase__) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase__ , feature_script=lowerCAmelCase__ , tested_section="main()" if parser_only else "training_function()" , ):
SCREAMING_SNAKE_CASE_: Optional[int] = compare_against_test(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = "\n".join(lowerCAmelCase__)
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE_: Optional[Any] = diff.replace(lowerCAmelCase__ , "")
self.assertEqual(lowerCAmelCase__ , "")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase__)
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.abspath(os.path.join("examples" , "cv_example.py"))
SCREAMING_SNAKE_CASE_: str = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any]):
super().setUpClass()
SCREAMING_SNAKE_CASE_: Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(cls._tmpdir , "default_config.yml")
write_basic_config(save_location=cls.configPath)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str]):
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0")))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
SCREAMING_SNAKE_CASE_: Optional[Any] = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2")))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: str = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0')}\n ".split()
SCREAMING_SNAKE_CASE_: List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__)
self.assertNotIn("epoch 0:" , lowerCAmelCase__)
self.assertIn("epoch 1:" , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[str] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2')}\n ".split()
SCREAMING_SNAKE_CASE_: Dict = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__)
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_: List[str] = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_: str = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCAmelCase__)
self.assertIn("epoch 1:" , lowerCAmelCase__)
else:
self.assertIn("epoch 0:" , lowerCAmelCase__)
self.assertIn("epoch 1:" , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"}):
SCREAMING_SNAKE_CASE_: Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = re.findall("({.+})" , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = [r for r in results if "accuracy" in r][-1]
SCREAMING_SNAKE_CASE_: List[Any] = ast.literal_eval(lowerCAmelCase__)
self.assertGreaterEqual(results["accuracy"] , 0.75)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def _SCREAMING_SNAKE_CASE ( self : Dict):
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE_: List[Any] = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "tracking")))
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs)
| 127
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowerCAmelCase : str = {
"""b0""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = EfficientNetConfig()
SCREAMING_SNAKE_CASE_: Any = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE_: Optional[Any] = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE_: List[Any] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE_: str = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: int = 10_00
SCREAMING_SNAKE_CASE_: int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any = idalabel
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: Optional[Any] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE_: Optional[Any] = sorted(set(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
SCREAMING_SNAKE_CASE_: List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE_: List[str] = block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE_: str = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE_: List[str] = "classifier.weight"
SCREAMING_SNAKE_CASE_: Optional[Any] = "classifier.bias"
return key_mapping
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE_: List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE_: str = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE_: Tuple = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
SCREAMING_SNAKE_CASE_: List[str] = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model_classes[model_name](
include_top=_UpperCAmelCase , weights="imagenet" , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=10_00 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE_: Tuple = original_model.trainable_variables
SCREAMING_SNAKE_CASE_: Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE_: List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE_: str = param.numpy()
SCREAMING_SNAKE_CASE_: Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE_: Any = get_efficientnet_config(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE_: str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE_: Tuple = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE_: Optional[Any] = convert_image_processor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = hf_model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE_: int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE_: Tuple = image.img_to_array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = np.expand_dims(_UpperCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE_: str = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub..." )
SCREAMING_SNAKE_CASE_: Optional[Any] = f"efficientnet-{model_name}"
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 127
| 1
|
"""simple docstring"""
def __lowercase ( snake_case_ : str ) ->list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 179
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowercase ( snake_case_ : str ,snake_case_ : Tuple=100 ,snake_case_ : int=" " ) ->List[str]:
'''simple docstring'''
__A : Dict = text.split(snake_case_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 ,len(snake_case_ ) ,snake_case_ )]
def __lowercase ( snake_case_ : dict ) ->dict:
'''simple docstring'''
__A , __A : Optional[int] = [], []
for title, text in zip(documents['''title'''] ,documents['''text'''] ):
if text is not None:
for passage in split_text(snake_case_ ):
titles.append(title if title is not None else '''''' )
texts.append(snake_case_ )
return {"title": titles, "text": texts}
def __lowercase ( snake_case_ : dict ,snake_case_ : DPRContextEncoder ,snake_case_ : DPRContextEncoderTokenizerFast ) ->dict:
'''simple docstring'''
__A : Tuple = ctx_tokenizer(
documents['''title'''] ,documents['''text'''] ,truncation=snake_case_ ,padding='''longest''' ,return_tensors='''pt''' )['''input_ids''']
__A : int = ctx_encoder(input_ids.to(device=snake_case_ ) ,return_dict=snake_case_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowercase ( snake_case_ : "RagExampleArguments" ,snake_case_ : "ProcessingArguments" ,snake_case_ : "IndexHnswArguments" ,) ->Tuple:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__A : Optional[Any] = load_dataset(
'''csv''' ,data_files=[rag_example_args.csv_path] ,split='''train''' ,delimiter='''\t''' ,column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__A : str = dataset.map(snake_case_ ,batched=snake_case_ ,num_proc=processing_args.num_proc )
# And compute the embeddings
__A : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ )
__A : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__A : Optional[int] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__A : List[Any] = dataset.map(
partial(snake_case_ ,ctx_encoder=snake_case_ ,ctx_tokenizer=snake_case_ ) ,batched=snake_case_ ,batch_size=processing_args.batch_size ,features=snake_case_ ,)
# And finally save your dataset
__A : int = os.path.join(rag_example_args.output_dir ,'''my_knowledge_dataset''' )
dataset.save_to_disk(snake_case_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__A : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d ,index_hnsw_args.m ,faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' ,custom_index=snake_case_ )
# And save the index
__A : List[str] = os.path.join(rag_example_args.output_dir ,'''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(snake_case_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_lowerCamelCase = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_lowerCamelCase = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_lowerCamelCase = field(
default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_lowerCamelCase = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_lowerCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a_ , a_ , a_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 179
| 1
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Dict = '''▁'''
lowerCAmelCase__ :Dict = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ :Dict = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCAmelCase__ :int = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCAmelCase__ :Optional[int] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
lowerCAmelCase__ :Any = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __a ( UpperCAmelCase ):
_a : List[str] = ["input_ids"]
_a : List[str] = VOCAB_FILES_NAMES
_a : str = PRETRAINED_INIT_CONFIGURATION
_a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = PRETRAINED_VOCAB_FILES_MAP
_a : Tuple = RESOURCE_FILES_NAMES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="utf8" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , vocab_file=_SCREAMING_SNAKE_CASE , encoding=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = sentencepiece_model_ckpt
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_UpperCAmelCase = self.load_vocab(filepath=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = {self.sp_model.id_to_piece(_SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if text is None:
return None
_UpperCAmelCase = self.tokenize(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = '', []
for i, ch in enumerate(_SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
_UpperCAmelCase = self.SP_CHAR_MAPPING.get(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = unicodedata.normalize('NFKC' , _SCREAMING_SNAKE_CASE )
if self.is_whitespace(_SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = normalized_text, [], 0
if self.do_lower_case:
_UpperCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_UpperCAmelCase = token[1:]
_UpperCAmelCase = text[offset:].index(_SCREAMING_SNAKE_CASE ) + offset
_UpperCAmelCase = start + len(_SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_UpperCAmelCase = end
return token_mapping
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c in text) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=0.1 ) -> str:
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
_UpperCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
_UpperCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
_UpperCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
_UpperCAmelCase = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = self.sp_model.SampleEncodeAsPieces(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for pi, piece in enumerate(_SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(_SCREAMING_SNAKE_CASE )
continue
else:
continue
_UpperCAmelCase = 0
for i, chunk in enumerate(_SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_SCREAMING_SNAKE_CASE ) or self.is_punct(_SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_UpperCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_UpperCAmelCase = i
if len(_SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.reverse_vocab.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(_SCREAMING_SNAKE_CASE ) + 3)
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_SCREAMING_SNAKE_CASE ) == 1:
_UpperCAmelCase = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
with io.open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = line.rstrip('\n' )
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return token_to_idx
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = 0
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_UpperCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
_UpperCAmelCase = token_index
writer.write(token + '\n' )
index += 1
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'sentencepiece.bpe.model' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 355
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int ) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_UpperCAmelCase = (low + high) // 2
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , a__ , a__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_subarray(a__ , mid + 1 , a__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = max_cross_sum(a__ , a__ , a__ , a__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCAmelCase__ ( a__: Sequence[float] , a__: int , a__: int , a__: int ) -> tuple[int, int, float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1
_UpperCAmelCase , _UpperCAmelCase = float('-inf' ), -1
_UpperCAmelCase = 0
for i in range(a__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
_UpperCAmelCase = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_UpperCAmelCase = summ
_UpperCAmelCase = i
return max_left, max_right, (left_sum + right_sum)
def lowerCAmelCase__ ( a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = [randint(1 , a__ ) for _ in range(a__ )]
_UpperCAmelCase = time.time()
max_subarray(a__ , 0 , input_size - 1 )
_UpperCAmelCase = time.time()
return end - start
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
_UpperCAmelCase = [time_max_subarray(a__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(a__ , a__ ):
print(a__ , '\t\t' , a__ )
plt.plot(a__ , a__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 185
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'mgp-str'
def __init__( self , SCREAMING_SNAKE_CASE_=[32, 128] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=27 , SCREAMING_SNAKE_CASE_=38 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_2 , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = max_token_length
__UpperCamelCase = num_character_labels
__UpperCamelCase = num_bpe_labels
__UpperCamelCase = num_wordpiece_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = mlp_ratio
__UpperCamelCase = distilled
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = drop_rate
__UpperCamelCase = qkv_bias
__UpperCamelCase = attn_drop_rate
__UpperCamelCase = drop_path_rate
__UpperCamelCase = output_aa_attentions
__UpperCamelCase = initializer_range
| 328
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328
| 1
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
UpperCamelCase_ =get_logger(__name__)
class _a :
def __init__( self : str, lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self, lowerCAmelCase__, getattr(lowerCAmelCase__, lowerCAmelCase__ ) )
_UpperCamelCase : Tuple = module._original_module if isinstance(lowerCAmelCase__, _PatchedModuleObj ) else module
class _a :
UpperCamelCase = []
def __init__( self : List[Any], lowerCAmelCase__ : Dict, lowerCAmelCase__ : str, lowerCAmelCase__ : List[str], lowerCAmelCase__ : Union[str, Any]=None ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = obj
_UpperCamelCase : str = target
_UpperCamelCase : Optional[int] = new
_UpperCamelCase : Tuple = target.split('''.''' )[0]
_UpperCamelCase : Optional[int] = {}
_UpperCamelCase : Union[str, Any] = attrs or []
def __enter__( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
_UpperCamelCase : List[str] = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCamelCase : Optional[int] = getattr(self.obj, lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__, _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj, lowerCAmelCase__, _PatchedModuleObj(lowerCAmelCase__, attrs=self.attrs ) )
_UpperCamelCase : Optional[Any] = getattr(self.obj, lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__, lowerCAmelCase__, _PatchedModuleObj(getattr(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ ), attrs=self.attrs ) )
_UpperCamelCase : List[Any] = getattr(lowerCAmelCase__, lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__, lowerCAmelCase__, self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCamelCase : List[Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ), lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, lowerCAmelCase__ ) is attr_value:
_UpperCamelCase : List[str] = getattr(self.obj, lowerCAmelCase__ )
setattr(self.obj, lowerCAmelCase__, self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCamelCase : Dict = globals()['__builtins__'][target_attr]
setattr(self.obj, lowerCAmelCase__, self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : List[str], *lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj, lowerCAmelCase__, self.original.pop(lowerCAmelCase__ ) )
def a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def a ( self : Any ) -> Tuple:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 351
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ =0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _a :
def __init__( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : str = WATERMARK_BITS
_UpperCamelCase : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''', self.watermark )
def snake_case ( self : Dict, lowerCAmelCase__ : torch.FloatTensor ) -> int:
'''simple docstring'''
if images.shape[-1] < 2_5_6:
return images
_UpperCamelCase : Union[str, Any] = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1 ).float().numpy()
_UpperCamelCase : List[str] = [self.encoder.encode(lowerCAmelCase__, '''dwtDct''' ) for image in images]
_UpperCamelCase : Dict = torch.from_numpy(np.array(lowerCAmelCase__ ) ).permute(0, 3, 1, 2 )
_UpperCamelCase : Optional[int] = torch.clamp(2 * (images / 2_5_5 - 0.5), min=-1.0, max=1.0 )
return images
| 128
| 0
|
def __lowercase ( lowerCamelCase : int ):
assert isinstance(lowerCamelCase , lowerCamelCase ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
UpperCamelCase_ : int = F"The input value of [n={number}] has to be > 0"
raise ValueError(lowerCamelCase )
else:
UpperCamelCase_ : Optional[int] = sylvester(number - 1 )
UpperCamelCase_ : Union[str, Any] = num - 1
UpperCamelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 175
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[Any] = 'huggingface/label-files'
UpperCamelCase_ : int = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : str = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase_ : int = BitConfig(
conv_layer=lowerCamelCase , num_labels=1000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def __lowercase ( lowerCamelCase : int ):
if "stem.conv" in name:
UpperCamelCase_ : str = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCamelCase_ : str = name.replace('blocks' , 'layers' )
if "head.fc" in name:
UpperCamelCase_ : Dict = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
UpperCamelCase_ : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase_ : int = 'bit.encoder.' + name
return name
def __lowercase ( ):
UpperCamelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[str]=False ):
UpperCamelCase_ : Optional[Any] = get_config(lowerCamelCase )
# load original model from timm
UpperCamelCase_ : Dict = create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase_ : str = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase_ : Tuple = state_dict.pop(lowerCamelCase )
UpperCamelCase_ : str = val.squeeze() if 'head' in key else val
# load HuggingFace model
UpperCamelCase_ : Dict = BitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# create image processor
UpperCamelCase_ : int = create_transform(**resolve_data_config({} , model=lowerCamelCase ) )
UpperCamelCase_ : List[Any] = transform.transforms
UpperCamelCase_ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase_ : Tuple = BitImageProcessor(
do_resize=lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase_ : Dict = prepare_img()
UpperCamelCase_ : Any = transform(lowerCamelCase ).unsqueeze(0 )
UpperCamelCase_ : Union[str, Any] = processor(lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
# verify logits
with torch.no_grad():
UpperCamelCase_ : Dict = model(lowerCamelCase )
UpperCamelCase_ : Tuple = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase_ : List[Any] = timm_model(lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175
| 1
|
"""simple docstring"""
import heapq
import sys
import numpy as np
__A = tuple[int, int]
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Dict = []
lowercase__: List[str] = set()
def _snake_case ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self ):
return len(self.elements ) == 0
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCAmelCase )
else:
# update
# print("update", item)
lowercase__: Tuple = []
(lowercase__): Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(lowercase__): Optional[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self , _UpperCAmelCase ):
if item in self.set:
self.set.remove(_UpperCAmelCase )
lowercase__: List[Any] = []
(lowercase__): Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(lowercase__): Optional[int] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self ):
return self.elements[0][1]
def _snake_case ( self ):
(lowercase__): Any = heapq.heappop(self.elements )
self.set.remove(_UpperCAmelCase )
return (priority, item)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
# euclidean distance
lowercase__: Dict = np.array(__UpperCAmelCase )
lowercase__: List[Any] = np.array(__UpperCAmelCase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
# integer division by time variable
return consistent_heuristic(__UpperCAmelCase , __UpperCAmelCase ) // t
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Optional[int] = g_function[start] + Wa * heuristics[i](__UpperCAmelCase , __UpperCAmelCase )
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: Tuple = np.chararray((n, n) )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
lowercase__: Dict = '''*'''
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
lowercase__: Dict = '''#'''
lowercase__: Any = '''-'''
lowercase__: Optional[Any] = back_pointer[goal]
while x != start:
(lowercase__): Optional[int] = x
# print(x)
lowercase__: Optional[Any] = '''-'''
lowercase__: List[str] = back_pointer[x]
lowercase__: List[Any] = '''-'''
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowercase__: List[str] = back_pointer[goal]
while x != start:
print(__UpperCAmelCase , end=''' ''' )
lowercase__: int = back_pointer[x]
print(__UpperCAmelCase )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
for itera in range(__UpperCAmelCase ):
open_list[itera].remove_element(__UpperCAmelCase )
# print("s", s)
# print("j", j)
(lowercase__): Optional[Any] = s
lowercase__: Any = (x - 1, y)
lowercase__: int = (x + 1, y)
lowercase__: Union[str, Any] = (x, y + 1)
lowercase__: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__UpperCAmelCase )
lowercase__: List[str] = -1
lowercase__: Any = float('''inf''' )
if valid(__UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
lowercase__: int = g_function[s] + 1
lowercase__: Union[str, Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(__UpperCAmelCase , key(__UpperCAmelCase , 0 , __UpperCAmelCase , __UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __UpperCAmelCase ):
if key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) <= Wa * key(
__UpperCAmelCase , 0 , __UpperCAmelCase , __UpperCAmelCase ):
open_list[j].put(
__UpperCAmelCase , key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
lowercase__: Optional[Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__A = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__A = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
__A = make_common_ground()
__A = blocks_blk
# hyper parameters
__A = 1
__A = 1
__A = 2_0
__A = 3 # one consistent and two other inconsistent
# start and end destination
__A = (0, 0)
__A = (n - 1, n - 1)
__A = 1
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowercase__: Any = {start: 0, goal: float('''inf''' )}
lowercase__: int = {start: -1, goal: -1}
lowercase__: Dict = []
lowercase__: Dict = set()
for i in range(__UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__UpperCAmelCase , key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
lowercase__: list[int] = []
lowercase__: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowercase__: Optional[Any] = open_list[i].top_show()
visited.add(__UpperCAmelCase )
expand_state(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
close_list_inad.append(__UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowercase__: Optional[Any] = open_list[0].top_show()
visited.add(__UpperCAmelCase )
expand_state(
__UpperCAmelCase , 0 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
close_list_anchor.append(__UpperCAmelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__UpperCAmelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 369
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2
| 0
|
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
snake_case = len(UpperCamelCase_ )
snake_case = max(UpperCamelCase_ )
snake_case = min(UpperCamelCase_ )
# create the counting array
snake_case = coll_max + 1 - coll_min
snake_case = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,UpperCamelCase_ ):
snake_case = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,UpperCamelCase_ ) ):
snake_case = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return "".join([chr(UpperCamelCase_ ) for i in counting_sort([ord(UpperCamelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
_SCREAMING_SNAKE_CASE : int = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 127
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
__magic_name__ = XGLMConfig
__magic_name__ = {}
__magic_name__ = 'gelu'
def __init__( self , __snake_case , __snake_case=1_4 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = d_model
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = ffn_dim
snake_case = activation_function
snake_case = activation_dropout
snake_case = attention_dropout
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = None
snake_case = 0
snake_case = 2
snake_case = 1
def a_ ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def a_ ( self ):
snake_case = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = self.get_config()
snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a_ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__snake_case , )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = TFXGLMModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
@slow
def a_ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFXGLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def a_ ( self ):
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self , __snake_case=True ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case = model.generate(__snake_case , do_sample=__snake_case , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
@slow
def a_ ( self ):
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
snake_case = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
snake_case = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
snake_case = model.generate(__snake_case , do_sample=__snake_case , seed=[7, 0] )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case )
snake_case = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__snake_case , __snake_case )
@slow
def a_ ( self ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = '''left'''
# use different length sentences to test batching
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
snake_case = tokenizer(__snake_case , return_tensors='''tf''' , padding=__snake_case )
snake_case = inputs['''input_ids''']
snake_case = model.generate(input_ids=__snake_case , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case )
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
| 127
| 1
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__UpperCamelCase : int = MBartConfig
__UpperCamelCase : int = {}
__UpperCamelCase : Tuple = "gelu"
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
UpperCamelCase = inputs_dict['''input_ids''']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase = inputs_dict['''head_mask''']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.to_tuple()
UpperCamelCase = past_key_values[1]
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[str] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : Tuple = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Any = False
__UpperCamelCase : str = False
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = TFMBartModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = [
" UN Chief Says There Is No Military Solution in Syria",
]
__UpperCamelCase : Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__UpperCamelCase : Tuple = "facebook/mbart-large-en-ro"
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = self.translate_src_text(**__SCREAMING_SNAKE_CASE )
self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
return generated_words
@slow
def _lowercase ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 354
|
import qiskit
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase : Any = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : int = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 315
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase__ ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE : BigBirdConfig
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
def lowerCAmelCase (self : List[Any] ):
super().setup()
__a : str = nn.Dense(5 , dtype=self.dtype )
def __call__(self : Tuple , *snake_case_ : Optional[Any] , **snake_case_ : Any ):
__a : Optional[int] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__a : str = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase__ ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ):
def cross_entropy(lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=None ):
__a : Union[str, Any] = logits.shape[-1]
__a : Any = (labels[..., None] == jnp.arange(UpperCAmelCase_ )[None]).astype('''f4''' )
__a : Dict = jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
__a : Optional[int] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__a : str = reduction(UpperCAmelCase_ )
return loss
__a : List[str] = partial(UpperCAmelCase_ , reduction=jnp.mean )
__a : List[str] = cross_entropy(UpperCAmelCase_ , UpperCAmelCase_ )
__a : str = cross_entropy(UpperCAmelCase_ , UpperCAmelCase_ )
__a : Tuple = cross_entropy(UpperCAmelCase_ , UpperCAmelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : str = "google/bigbird-roberta-base"
_SCREAMING_SNAKE_CASE : int = 3_000
_SCREAMING_SNAKE_CASE : int = 10_500
_SCREAMING_SNAKE_CASE : int = 128
_SCREAMING_SNAKE_CASE : int = 3
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : int = 5
# tx_args
_SCREAMING_SNAKE_CASE : float = 3e-5
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 20_000
_SCREAMING_SNAKE_CASE : float = 0.00_95
_SCREAMING_SNAKE_CASE : str = "bigbird-roberta-natural-questions"
_SCREAMING_SNAKE_CASE : str = "training-expt"
_SCREAMING_SNAKE_CASE : str = "data/nq-training.jsonl"
_SCREAMING_SNAKE_CASE : str = "data/nq-validation.jsonl"
def lowerCAmelCase (self : Optional[int] ):
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
__a : Dict = os.path.join(self.base_dir , self.save_dir )
__a : Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int = 4_096 # no dynamic padding on TPUs
def __call__(self : Union[str, Any] , snake_case_ : Any ):
__a : Any = self.collate_fn(SCREAMING_SNAKE_CASE_ )
__a : Dict = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return batch
def lowerCAmelCase (self : Tuple , snake_case_ : Dict ):
__a : List[str] = self.fetch_inputs(features['''input_ids'''] )
__a : Dict = {
'input_ids': jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
'attention_mask': jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
'start_labels': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase (self : str , snake_case_ : str ):
__a : Optional[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Union[str, Any] ):
__a : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
while len(SCREAMING_SNAKE_CASE_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str]=None ):
if seed is not None:
__a : Union[str, Any] = dataset.shuffle(seed=UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) // batch_size ):
__a : List[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase_ )
@partial(jax.pmap , axis_name='''batch''' )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ):
def loss_fn(lowerCAmelCase__ : Any ):
__a : Union[str, Any] = model_inputs.pop('''start_labels''' )
__a : int = model_inputs.pop('''end_labels''' )
__a : int = model_inputs.pop('''pooled_labels''' )
__a : int = state.apply_fn(**UpperCAmelCase_ , params=UpperCAmelCase_ , dropout_rng=UpperCAmelCase_ , train=UpperCAmelCase_ )
__a : Optional[Any] = outputs
return state.loss_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
__a : int = jax.random.split(UpperCAmelCase_ )
__a : List[Any] = jax.value_and_grad(UpperCAmelCase_ )
__a : Union[str, Any] = grad_fn(state.params )
__a : int = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__a : Dict = jax.lax.pmean(UpperCAmelCase_ , '''batch''' )
__a : Optional[int] = state.apply_gradients(grads=UpperCAmelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : int ):
__a : Optional[int] = model_inputs.pop('''start_labels''' )
__a : List[Any] = model_inputs.pop('''end_labels''' )
__a : Optional[Any] = model_inputs.pop('''pooled_labels''' )
__a : int = state.apply_fn(**UpperCAmelCase_ , params=state.params , train=UpperCAmelCase_ )
__a : Optional[Any] = outputs
__a : List[str] = state.loss_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__a : Optional[int] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class UpperCamelCase__ ( train_state.TrainState ):
_SCREAMING_SNAKE_CASE : Callable = struct.field(pytree_node=_UpperCAmelCase )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Args
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : wandb
_SCREAMING_SNAKE_CASE : Callable = None
def lowerCAmelCase (self : int , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[int]=None ):
__a : int = model.params
__a : List[Any] = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
__a : Tuple = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__a : Dict = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
__a : Any = build_tx(**SCREAMING_SNAKE_CASE_ )
__a : Any = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
__a : str = args
__a : Tuple = data_collator
__a : int = lr
__a : Optional[Any] = params
__a : Optional[Any] = jax_utils.replicate(SCREAMING_SNAKE_CASE_ )
return state
def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
__a : List[Any] = self.args
__a : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.batch_size
__a : str = jax.random.PRNGKey(0 )
__a : Optional[Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
for epoch in range(args.max_epochs ):
__a : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
__a : Optional[Any] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_ )
__a : List[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f"Running EPOCH-{epoch}" ):
__a : List[Any] = self.data_collator(SCREAMING_SNAKE_CASE_ )
__a : str = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__a : Optional[int] = jax_utils.unreplicate(state.step )
__a : Tuple = running_loss.item() / i
__a : int = self.scheduler_fn(state_step - 1 )
__a : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__a : Tuple = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_ ) )
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}" , state=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : Dict , snake_case_ : str ):
__a : List[Any] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size )
__a : int = len(SCREAMING_SNAKE_CASE_ ) // self.args.batch_size
__a : Any = jnp.array(0 , dtype=jnp.floataa )
__a : Tuple = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc='''Evaluating ... ''' ):
__a : Union[str, Any] = self.data_collator(SCREAMING_SNAKE_CASE_ )
__a : Any = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] ):
__a : Dict = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_ )
print(f"SAVING CHECKPOINT IN {save_dir}" , end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , SCREAMING_SNAKE_CASE_ )
print('''DONE''' )
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=''' ... ''' )
with open(os.path.join(UpperCAmelCase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__a : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCAmelCase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__a : Optional[int] = from_bytes(state.opt_state , f.read() )
__a : int = joblib.load(os.path.join(UpperCAmelCase_ , '''args.joblib''' ) )
__a : Optional[Any] = joblib.load(os.path.join(UpperCAmelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(UpperCAmelCase_ , '''training_state.json''' ) , '''r''' ) as f:
__a : Optional[Any] = json.load(UpperCAmelCase_ )
__a : Optional[Any] = training_state['step']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ):
__a : List[Any] = num_train_steps - warmup_steps
__a : Union[str, Any] = optax.linear_schedule(init_value=UpperCAmelCase_ , end_value=UpperCAmelCase_ , transition_steps=UpperCAmelCase_ )
__a : Dict = optax.linear_schedule(init_value=UpperCAmelCase_ , end_value=1e-7 , transition_steps=UpperCAmelCase_ )
__a : Optional[int] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ):
def weight_decay_mask(lowerCAmelCase__ : List[Any] ):
__a : List[str] = traverse_util.flatten_dict(UpperCAmelCase_ )
__a : List[Any] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase_ )
__a : List[Any] = scheduler_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__a : Optional[int] = optax.adamw(learning_rate=UpperCAmelCase_ , weight_decay=UpperCAmelCase_ , mask=UpperCAmelCase_ )
return tx, lr
| 216
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A__ : List[str] = get_logger(__name__)
A__ : str = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class UpperCAmelCase_ :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
for processor in self:
__lowerCamelCase : str = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
__lowerCamelCase : Tuple = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : int = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
__lowerCamelCase : Optional[int] = temperature
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : Dict = scores / self.temperature
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> Union[str, Any]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
__lowerCamelCase : str = top_p
__lowerCamelCase : Tuple = filter_value
__lowerCamelCase : Tuple = min_tokens_to_keep
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase : Any = lax.top_k(SCREAMING_SNAKE_CASE_ , scores.shape[-1] )
__lowerCamelCase : int = jnp.full_like(SCREAMING_SNAKE_CASE_ , self.filter_value )
__lowerCamelCase : Tuple = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ).cumsum(axis=-1 )
__lowerCamelCase : List[str] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase : Tuple = jnp.roll(SCREAMING_SNAKE_CASE_ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
# min tokens to keep
__lowerCamelCase : Any = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = jnp.where(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[-1]
return next_scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -float('Inf' ) , SCREAMING_SNAKE_CASE_ = 1 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
__lowerCamelCase : List[str] = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = filter_value
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase : List[Any] = scores.shape
__lowerCamelCase : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCamelCase : int = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCamelCase , __lowerCamelCase : Tuple = lax.top_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCamelCase : List[Any] = topk_scores.flatten()
__lowerCamelCase : Union[str, Any] = topk_indices.flatten() + shift
__lowerCamelCase : Tuple = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return next_scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Any = bos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : Optional[Any] = 1 - jnp.bool_(cur_len - 1 )
__lowerCamelCase : List[Any] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Tuple = max_length
__lowerCamelCase : Any = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : List[str] = jnp.full(scores.shape , -float('inf' ) )
__lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCamelCase : List[str] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
__lowerCamelCase : str = min_length
__lowerCamelCase : Optional[int] = eos_token_id
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__lowerCamelCase : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = begin_index
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
__lowerCamelCase : int = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = dict(SCREAMING_SNAKE_CASE_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase : str = force_token_array.at[index].set(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = jnp.intaa(SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> jnp.ndarray:
def _force_token(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[str] = scores.shape[0]
__lowerCamelCase : Tuple = self.force_token_array[generation_idx]
__lowerCamelCase : List[Any] = jnp.ones_like(SCREAMING_SNAKE_CASE_ , dtype=scores.dtype ) * -float('inf' )
__lowerCamelCase : Any = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCamelCase : str = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (0, current_token) )
return new_scores
__lowerCamelCase : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE_ ) , lambda: scores , ) , )
return scores
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = generate_config.eos_token_id
__lowerCamelCase : Dict = generate_config.no_timestamps_token_id
__lowerCamelCase : Tuple = generate_config.no_timestamps_token_id + 1
__lowerCamelCase : List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE_ , 'max_initial_timestamp_index' ):
__lowerCamelCase : str = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase : Tuple = model_config.vocab_size
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCamelCase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return jnp.where(
SCREAMING_SNAKE_CASE_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Any = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase : str = jnp.where(
SCREAMING_SNAKE_CASE_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase : Any = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCamelCase : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Dict = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return scores
| 185
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCamelCase : Any = '''base_with_context'''
def lowerCamelCase__ ( __lowercase : Union[str, Any] , __lowercase : Tuple ) -> Any:
_snake_case = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[f'''layers_{lyr_num}''']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_snake_case = ly_weight['attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCamelCase__ ( __lowercase : Union[str, Any] , __lowercase : int ) -> List[Any]:
_snake_case = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
_snake_case = weights[f'''layers_{lyr_num}''']
_snake_case = ly_weight['attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCamelCase__ ( __lowercase : Dict , __lowercase : Tuple ) -> Dict:
_snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
_snake_case = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_snake_case = weights[f'''layers_{lyr_num}''']
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_snake_case = ly_weight['self_attention']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = ly_weight['MultiHeadDotProductAttention_0']
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_snake_case = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_snake_case = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def lowerCamelCase__ ( __lowercase : Union[str, Any] ) -> Tuple:
_snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_snake_case = jnp.tree_util.tree_map(onp.array , __lowercase )
_snake_case = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_snake_case = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_snake_case = inference.parse_training_gin_file(__lowercase , __lowercase )
_snake_case = inference.InferenceModel(args.checkpoint_path , __lowercase )
_snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_snake_case = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_snake_case = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_snake_case = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_snake_case = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowercase )
_snake_case = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowercase )
_snake_case = load_decoder(ta_checkpoint['target']['decoder'] , __lowercase )
_snake_case = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_snake_case = SpectrogramDiffusionPipeline(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
_lowerCamelCase : Dict = parser.parse_args()
main(args)
| 356
|
from functools import lru_cache
@lru_cache
def a_ ( __lowercase : int ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
UpperCamelCase_ , UpperCamelCase_ = coefficient_matrix.shape
UpperCamelCase_ , UpperCamelCase_ = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if colsa != 1:
UpperCamelCase_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase)
if rowsa != rowsa:
UpperCamelCase_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase)
if len(_lowerCAmelCase) != rowsa:
UpperCamelCase_ = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(_lowerCAmelCase)} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
UpperCamelCase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
UpperCamelCase_ , UpperCamelCase_ = table.shape
strictly_diagonally_dominant(_lowerCAmelCase)
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase):
UpperCamelCase_ = []
for row in range(_lowerCAmelCase):
UpperCamelCase_ = 0
for col in range(_lowerCAmelCase):
if col == row:
UpperCamelCase_ = table[row][col]
elif col == cols - 1:
UpperCamelCase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase_ = (temp + val) / denom
new_val.append(_lowerCAmelCase)
UpperCamelCase_ = new_val
return [float(_lowerCAmelCase) for i in new_val]
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = table.shape
UpperCamelCase_ = True
for i in range(0 , _lowerCAmelCase):
UpperCamelCase_ = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.1 , snake_case__=True , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : int = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : str = seq_length
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Optional[int] = use_input_mask
lowerCAmelCase : int = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_multiple_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : List[str] = hidden_dropout
lowerCAmelCase : Optional[Any] = attention_dropout
lowerCAmelCase : List[str] = weight_tying
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : Any = num_choices
lowerCAmelCase : Any = scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : int = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase__ ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.prepare_config_and_inputs()
lowerCAmelCase : Dict = True
return config, input_ids, input_mask, token_labels
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = GPTNeoXJapaneseModel(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase : str = model(_A , attention_mask=_A )
lowerCAmelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : int = GPTNeoXJapaneseModel(_A )
model.to(_A )
model.eval()
lowerCAmelCase : Any = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
lowerCAmelCase : Optional[int] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = True
lowerCAmelCase : str = GPTNeoXJapaneseForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
lowerCAmelCase : Tuple = model(_A , attention_mask=_A , use_cache=_A )
lowerCAmelCase : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Optional[Any] = model(_A , attention_mask=_A , output_hidden_states=_A )
lowerCAmelCase : List[str] = output_from_no_past['hidden_states'][0]
lowerCAmelCase : Union[str, Any] = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
lowerCAmelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
lowerCAmelCase : Dict = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =(GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a : Tuple =(GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a : int =(
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a : int =False
a : str =False
a : List[str] =False
a : int =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = GPTNeoXJapaneseModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = 'abeja/gpt-neox-japanese-2.7b'
lowerCAmelCase : Tuple = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
lowerCAmelCase : Optional[Any] = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
lowerCAmelCase : Union[str, Any] = GPTNeoXJapaneseTokenizer.from_pretrained(_A )
lowerCAmelCase : Dict = GPTNeoXJapaneseForCausalLM.from_pretrained(_A )
lowerCAmelCase : Dict = []
for prompt in prompts:
lowerCAmelCase : List[Any] = tokenizer(_A , return_tensors="pt" ).input_ids
lowerCAmelCase : List[str] = model.generate(_A , max_length=50 )
lowerCAmelCase : int = tokenizer.batch_decode(_A , skip_special_tokens=_A )
predicted_outputs += generated_string
self.assertListEqual(_A , _A )
| 357
|
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict ="autoformer"
a : Dict ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = [1, 2, 3, 4, 5, 6, 7] , snake_case__ = True , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__ = True , snake_case__=True , snake_case__ = 10 , snake_case__ = 25 , snake_case__ = 3 , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = prediction_length
lowerCAmelCase : Dict = context_length if context_length is not None else prediction_length
lowerCAmelCase : Tuple = distribution_output
lowerCAmelCase : List[Any] = loss
lowerCAmelCase : int = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : str = lags_sequence
lowerCAmelCase : List[str] = scaling
lowerCAmelCase : List[Any] = num_dynamic_real_features
lowerCAmelCase : Tuple = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : Any = cardinality
else:
lowerCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : Tuple = embedding_dimension
else:
lowerCAmelCase : Any = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : str = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : Any = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : Union[str, Any] = decoder_attention_heads
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : Optional[Any] = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : int = decoder_layers
lowerCAmelCase : List[Any] = dropout
lowerCAmelCase : Optional[int] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : Optional[int] = encoder_layerdrop
lowerCAmelCase : Dict = decoder_layerdrop
lowerCAmelCase : Tuple = activation_function
lowerCAmelCase : Optional[Any] = init_std
lowerCAmelCase : List[Any] = use_cache
# Autoformer
lowerCAmelCase : Any = label_length
lowerCAmelCase : Any = moving_average
lowerCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 133
| 0
|
"""simple docstring"""
def lowercase ( _snake_case : int , _snake_case : Dict ) ->int:
"""simple docstring"""
while second != 0:
__snake_case : Any = first & second
first ^= second
__snake_case : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = int(input("""Enter the first number: """).strip())
SCREAMING_SNAKE_CASE : List[str] = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 102
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase : Any = None
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : List[str] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase : Any = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ : Optional[int] = TaTokenizer
lowerCAmelCase__ : List[int] = []
def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = extra_ids
@staticmethod
def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , )
return max_model_length
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 2
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 221
|
from math import pi, sqrt
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 221
| 1
|
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Any = {}
def __UpperCamelCase( self ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_UpperCAmelCase , " -> " , " -> ".join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase )
else:
# else make a new vertex
UpperCamelCase : Union[str, Any] = [to_vertex]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = True
print(_UpperCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 52
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 1
|
'''simple docstring'''
a : Union[str, Any] = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
UpperCAmelCase : int = 0
UpperCAmelCase : int = 0
while place < len(lowerCamelCase__ ):
if (place + 1 < len(lowerCamelCase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = []
for arabic, roman in ROMAN:
(UpperCAmelCase) : Optional[Any] = divmod(lowerCamelCase__ , lowerCamelCase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase__ = torch.rand(1) * 9_9_9
lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 6_6_6
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 130
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
|
"""simple docstring"""
def UpperCAmelCase ( ):
'''simple docstring'''
return 1
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def UpperCAmelCase ( a_ = 200 ):
'''simple docstring'''
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 205
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = torch.device("""cpu""")
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = val
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = []
for k in state_dict.keys():
__lowerCAmelCase = k
if ".pwconv" in k:
__lowerCAmelCase = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__lowerCAmelCase = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__lowerCAmelCase = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__lowerCAmelCase = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__lowerCAmelCase = k_new.split("." )
if ls[2].isdigit():
__lowerCAmelCase = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__lowerCAmelCase = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase = 10_00
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__lowerCAmelCase = [3, 3, 6, 4]
__lowerCAmelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
__lowerCAmelCase = [3, 3, 9, 6]
__lowerCAmelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
__lowerCAmelCase = [4, 3, 10, 5]
__lowerCAmelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
__lowerCAmelCase = [4, 4, 12, 6]
__lowerCAmelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
__lowerCAmelCase = checkpoint
__lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
__lowerCAmelCase = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# prepare test inputs
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = ViTImageProcessor.from_pretrained("preprocessor_config" )
__lowerCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# compare outputs from both models
__lowerCAmelCase = get_expected_output(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
UpperCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 92
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "ctrl"
snake_case_ : Optional[int] = ["past_key_values"]
snake_case_ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , snake_case__ : List[str]=246_534 , snake_case__ : Optional[Any]=256 , snake_case__ : List[str]=1_280 , snake_case__ : Optional[int]=8_192 , snake_case__ : List[Any]=48 , snake_case__ : Dict=16 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1e-6 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=True , **snake_case__ : List[str] , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**snake_case__ )
| 133
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=2 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.0_2 , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=2 , ):
"""simple docstring"""
_lowercase : Tuple = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : Any = patch_size
_lowercase : Dict = max_length
_lowercase : List[Any] = num_mel_bins
_lowercase : Any = is_training
_lowercase : int = use_labels
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Any = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : Any = scope
_lowercase : str = frequency_stride
_lowercase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowercase : List[str] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowercase : Dict = (self.max_length - self.patch_size) // self.time_stride + 1
_lowercase : int = frequency_out_dimension * time_out_dimension
_lowercase : int = num_patches + 2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowercase : Optional[Any] = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[Any] = ASTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : str = {"input_values": input_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : str = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = ASTModelTester(self )
_lowercase : int = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = model_class(_UpperCamelCase )
_lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Optional[int] = [*signature.parameters.keys()]
_lowercase : List[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = ASTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _A ( ) -> Any:
_lowercase : Optional[Any] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_lowercase , _lowercase : List[str] = torchaudio.load(snake_case )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.default_feature_extractor
_lowercase : Tuple = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_UpperCamelCase )
_lowercase : str = self.default_feature_extractor
_lowercase , _lowercase : List[Any] = prepare_audio()
_lowercase : Optional[Any] = audio.squeeze().numpy()
_lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , sampling_rate=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : Optional[int] = model(**_UpperCamelCase )
# verify the logits
_lowercase : Tuple = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_lowercase : int = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 199
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case , snake_case = None , snake_case = None ) -> None:
if start is None:
_lowercase : Dict = 0
if end is None:
_lowercase : List[Any] = len(snake_case ) - 1
if start >= end:
return
_lowercase : int = (start + end) // 2
slowsort(snake_case , snake_case , snake_case )
slowsort(snake_case , mid + 1 , snake_case )
if sequence[end] < sequence[mid]:
_lowercase , _lowercase : Any = sequence[mid], sequence[end]
slowsort(snake_case , snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 199
| 1
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase__( nn.Module ):
def __init__( self ) -> str:
super().__init__()
A__ = nn.Linear(3 ,4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4 ,5 )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) )
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return output + 1
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
A__ = ModelForTest()
A__ = ModelHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(test_model._hf_hook ,__UpperCAmelCase )
self.assertTrue(hasattr(__UpperCAmelCase ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase ,'_hf_hook' ) )
self.assertFalse(hasattr(__UpperCAmelCase ,'_old_forward' ) )
def snake_case__ ( self ) -> Optional[Any]:
A__ = ModelForTest()
A__ = ModelHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase ,append=__UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook ,__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(__UpperCAmelCase ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase ,'_hf_hook' ) )
self.assertFalse(hasattr(__UpperCAmelCase ,'_old_forward' ) )
def snake_case__ ( self ) -> Optional[Any]:
A__ = ModelForTest()
A__ = torch.randn(2 ,3 )
A__ = test_model(x + 1 )
A__ = test_model(x + 2 )
A__ = PreForwardHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ = PreForwardHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 )
def snake_case__ ( self ) -> List[str]:
A__ = ModelForTest()
A__ = torch.randn(2 ,3 )
A__ = test_model(__UpperCAmelCase )
A__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase ,output + 1 ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase ,output + 1 ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase ,output + 2 ,atol=1e-5 )
def snake_case__ ( self ) -> List[Any]:
A__ = ModelForTest()
A__ = torch.randn(2 ,3 )
A__ = test_model(__UpperCAmelCase )
A__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase ,__UpperCAmelCase )
A__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
A__ = True
A__ = test_model(__UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def snake_case__ ( self ) -> List[str]:
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCAmelCase ,AlignDevicesHook(io_same_device=__UpperCAmelCase ) )
A__ = torch.randn(2 ,3 ).to(0 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,torch.device(0 ) )
def snake_case__ ( self ) -> str:
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
A__ = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device ,__UpperCAmelCase )
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,__UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
A__ = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,__UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def snake_case__ ( self ) -> str:
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
A__ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__UpperCAmelCase ,execution_device=__UpperCAmelCase ,offload=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device ,__UpperCAmelCase )
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,__UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCAmelCase ,execution_device=__UpperCAmelCase ,offload=__UpperCAmelCase ,offload_buffers=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,__UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def snake_case__ ( self ) -> Optional[Any]:
A__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
A__ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__UpperCAmelCase ,execution_device=__UpperCAmelCase ,offload=__UpperCAmelCase ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device ,__UpperCAmelCase )
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,__UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCAmelCase ,execution_device=__UpperCAmelCase ,offload=__UpperCAmelCase ,weights_map=model.state_dict() ,offload_buffers=__UpperCAmelCase ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
A__ = torch.randn(2 ,3 )
A__ = model(__UpperCAmelCase )
self.assertEqual(output.device ,__UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
| 221
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=64 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> List[Any]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self ) -> str:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = True
A__ = GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = True
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
A__ = torch.cat([input_mask, next_mask] ,dim=-1 )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-3 ) )
def snake_case__ ( self ) -> Dict:
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__( __A , __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> Tuple:
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=64 ,num_attention_heads=8 )
def snake_case__ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def snake_case__ ( self ) -> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self ) -> str:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] ,config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
A__ = original_model(__UpperCAmelCase ).last_hidden_state
A__ = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 1_0.0}
A__ = GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
@require_torch
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> int:
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
A__ = tokenizer('My favorite food is' ,return_tensors='pt' ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 )
A__ = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 221
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__ : Optional[int] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a__ : Optional[int] = f'''https://www.google.com/search?q={query}&num=100'''
a__ : Union[str, Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a__ : Optional[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a__ : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 243
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = (DDPMScheduler,)
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__UpperCAmelCase )
return config
def snake_case ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ :List[Any] = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ :Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ :Optional[int] = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :str = len(__UpperCAmelCase )
lowerCAmelCase__ :str = self.dummy_model()
lowerCAmelCase__ :Any = self.dummy_sample_deter
lowerCAmelCase__ :int = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
lowerCAmelCase__ :Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ :Optional[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase__ :Optional[int] = pred_prev_sample
lowerCAmelCase__ :Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ :Tuple = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = len(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.dummy_model()
lowerCAmelCase__ :str = self.dummy_sample_deter
lowerCAmelCase__ :Dict = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ :List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase__ :List[str] = pred_prev_sample
lowerCAmelCase__ :Any = torch.sum(torch.abs(__UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config()
lowerCAmelCase__ :Union[str, Any] = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
lowerCAmelCase__ :str = scheduler.timesteps
for i, timestep in enumerate(__UpperCAmelCase ):
if i == len(__UpperCAmelCase ) - 1:
lowerCAmelCase__ :str = -1
else:
lowerCAmelCase__ :Any = timesteps[i + 1]
lowerCAmelCase__ :Optional[int] = scheduler.previous_timestep(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = prev_t.item()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.scheduler_classes[0]
lowerCAmelCase__ :Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ :Dict = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :Dict = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__UpperCAmelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Any = self.get_scheduler_config()
lowerCAmelCase__ :List[Any] = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase__ :Optional[int] = len(__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.scheduler_classes[0]
lowerCAmelCase__ :List[Any] = self.get_scheduler_config()
lowerCAmelCase__ :Any = scheduler_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
| 293
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _A ( lowercase = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
a =BeautifulSoup(requests.get(__lowerCAmelCase ).text , '''html.parser''' )
a =soup.findAll('''h1''' )
a =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 362
|
"""simple docstring"""
def _A ( lowercase = 2_00_00_00 ):
"""simple docstring"""
a =[0 for i in range(n + 1 )]
a =1
a =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
a =1
a =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 215
| 0
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase_ = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def A__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =0.0
for i, j in zip(lowerCAmelCase , lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase , lowerCAmelCase ) else 0.0
_lowercase =n_correct / len(lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 205
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 1
|
from __future__ import annotations
import math
_A = '2020.9.26'
_A = 'xcodz-dot, cclaus, dhruvmanila'
def UpperCAmelCase ( a_, a_, a_, a_, a_ ):
'''simple docstring'''
if not all(isinstance(lowerCAmelCase__, (float, int) ) for val in locals().values() ):
lowerCamelCase : Any = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowerCAmelCase__ )
lowerCamelCase : Union[str, Any] = ((x * distance) / (z + distance)) * scale
lowerCamelCase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase ( a_, a_, a_, a_, a_ ):
'''simple docstring'''
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
raise TypeError('Axis must be a str' )
lowerCamelCase : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(lowerCAmelCase__, (float, int) ) for val in input_variables.values() ):
lowerCamelCase : Dict = (
"""Input values except axis must either be float or int: """
F"""{list(input_variables.values() )}"""
)
raise TypeError(lowerCAmelCase__ )
lowerCamelCase : List[str] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase : List[Any] = x * math.cos(lowerCAmelCase__ ) - y * math.sin(lowerCAmelCase__ )
lowerCamelCase : Any = y * math.cos(lowerCAmelCase__ ) + x * math.sin(lowerCAmelCase__ )
lowerCamelCase : Any = z
elif axis == "x":
lowerCamelCase : Optional[int] = y * math.cos(lowerCAmelCase__ ) - z * math.sin(lowerCAmelCase__ )
lowerCamelCase : Dict = z * math.cos(lowerCAmelCase__ ) + y * math.sin(lowerCAmelCase__ )
lowerCamelCase : Tuple = x
elif axis == "y":
lowerCamelCase : int = x * math.cos(lowerCAmelCase__ ) - z * math.sin(lowerCAmelCase__ )
lowerCamelCase : Tuple = z * math.cos(lowerCAmelCase__ ) + x * math.sin(lowerCAmelCase__ )
lowerCamelCase : List[str] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }""")
| 366
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
_A = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
_A = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
_A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
_A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 205
| 0
|
def a_ ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict =len(SCREAMING_SNAKE_CASE__ )
while cur > 1:
# Find the maximum number in arr
_lowerCamelCase : Tuple =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCamelCase : Any =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE__ )]
# Reverse whole list
_lowerCamelCase : Any =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE__ )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 199
|
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
assert x is not None
assert y is not None
_lowerCamelCase : Dict =len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ )
# declaring the array for storing the dp values
_lowerCamelCase : Optional[int] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCamelCase : int =1 if x[i - 1] == y[j - 1] else 0
_lowerCamelCase : Union[str, Any] =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCamelCase : Optional[int] =''
_lowerCamelCase , _lowerCamelCase : Dict =m, n
while i > 0 and j > 0:
_lowerCamelCase : int =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCamelCase : Union[str, Any] =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase = 'AGGTAB'
lowerCamelCase = 'GXTXAYB'
lowerCamelCase = 4
lowerCamelCase = 'GTAB'
lowerCamelCase , lowerCamelCase = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 199
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowercase_ ):
snake_case__ : Optional[int] = ["keras_nlp"]
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 353
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
def __init__( self , **lowercase ) -> Optional[int]:
super().__init__(**lowercase )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(lowercase )
def __call__( self , lowercase , lowercase = None , **lowercase , ) -> List[str]:
if "text_queries" in kwargs:
lowerCAmelCase = kwargs.pop("""text_queries""" )
if isinstance(lowercase , (str, Image.Image) ):
lowerCAmelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowerCAmelCase = image
lowerCAmelCase = super().__call__(lowercase , **lowercase )
return results
def _snake_case ( self , **lowercase ) -> List[str]:
lowerCAmelCase = {}
if "threshold" in kwargs:
lowerCAmelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
lowerCAmelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _snake_case ( self , lowercase ) -> List[str]:
lowerCAmelCase = load_image(inputs["""image"""] )
lowerCAmelCase = inputs["""candidate_labels"""]
if isinstance(lowercase , lowercase ):
lowerCAmelCase = candidate_labels.split(""",""" )
lowerCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase ):
lowerCAmelCase = self.tokenizer(lowercase , return_tensors=self.framework )
lowerCAmelCase = self.image_processor(lowercase , return_tensors=self.framework )
yield {
"is_last": i == len(lowercase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = model_inputs.pop("""target_size""" )
lowerCAmelCase = model_inputs.pop("""candidate_label""" )
lowerCAmelCase = model_inputs.pop("""is_last""" )
lowerCAmelCase = self.model(**lowercase )
lowerCAmelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _snake_case ( self , lowercase , lowercase=0.1 , lowercase=None ) -> List[str]:
lowerCAmelCase = []
for model_output in model_outputs:
lowerCAmelCase = model_output["""candidate_label"""]
lowerCAmelCase = BaseModelOutput(lowercase )
lowerCAmelCase = self.image_processor.post_process_object_detection(
outputs=lowercase , threshold=lowercase , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase = outputs["""scores"""][index].item()
lowerCAmelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowerCAmelCase = {"""score""": score, """label""": label, """box""": box}
results.append(lowercase )
lowerCAmelCase = sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k:
lowerCAmelCase = results[:top_k]
return results
def _snake_case ( self , lowercase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = box.int().tolist()
lowerCAmelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 46
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
def is_in_circle(UpperCAmelCase , UpperCAmelCase ) -> bool:
a_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
a_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->None:
"""simple docstring"""
def identity_function(UpperCAmelCase ) -> float:
return x
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
def function_to_integrate(UpperCAmelCase ) -> float:
return sqrt(4.0 - x * x )
a_ = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243
| 0
|
from __future__ import annotations
import math
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : List[str] = str(__lowercase )
A_ : str = [n]
for i in range(1 ,len(__lowercase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if len(str(__lowercase ) ) > 3:
if not is_prime(int(str(__lowercase )[-3:] ) ) or not is_prime(int(str(__lowercase )[:3] ) ):
return False
return True
def UpperCamelCase ( __lowercase : int = 11 ):
'''simple docstring'''
A_ : list[int] = []
A_ : Dict = 13
while len(__lowercase ) != count:
if validate(__lowercase ):
A_ : Tuple = list_truncated_nums(__lowercase )
if all(is_prime(__lowercase ) for i in list_nums ):
list_truncated_primes.append(__lowercase )
num += 2
return list_truncated_primes
def UpperCamelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 192
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCAmelCase = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(lowercase , normalizer_state.pop('type' ) )
A_ : Optional[int] = do_lower_case
A_ : Optional[Any] = strip_accents
A_ : str = tokenize_chinese_chars
A_ : Any = normalizer_class(**lowercase )
A_ : Tuple = do_lower_case
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Dict = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 192
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.